From 5a483192176679db777ce50f9c30b993e1588e41 Mon Sep 17 00:00:00 2001 From: OmarSourman Date: Wed, 8 Oct 2025 00:07:42 -0400 Subject: [PATCH] black edits, fixed py2 issues print '...' manually --- diffpy/__init__.py | 3 +- diffpy/confutils/config.py | 620 ++++--- diffpy/confutils/tools.py | 152 +- diffpy/confutils/version.py | 11 +- diffpy/srxplanar/calculate.py | 258 +-- diffpy/srxplanar/loadimage.py | 82 +- diffpy/srxplanar/mask.py | 532 +++--- diffpy/srxplanar/saveresults.py | 157 +- diffpy/srxplanar/selfcalibrate.py | 403 +++-- diffpy/srxplanar/srxplanar.py | 226 ++- diffpy/srxplanar/srxplanarconfig.py | 740 +++++--- diffpy/srxplanar/tifffile.py | 2602 ++++++++++++++++----------- diffpy/srxplanar/version.py | 11 +- doc/manual/source/conf.py | 160 +- pyproject.toml | 22 + setup.py | 106 +- 16 files changed, 3606 insertions(+), 2479 deletions(-) create mode 100644 pyproject.toml diff --git a/diffpy/__init__.py b/diffpy/__init__.py index 794cadc..31be639 100644 --- a/diffpy/__init__.py +++ b/diffpy/__init__.py @@ -13,5 +13,4 @@ # ############################################################################## -__import__('pkg_resources').declare_namespace(__name__) - +__import__("pkg_resources").declare_namespace(__name__) diff --git a/diffpy/confutils/config.py b/diffpy/confutils/config.py index 2fcc9a4..49653cf 100644 --- a/diffpy/confutils/config.py +++ b/diffpy/confutils/config.py @@ -13,14 +13,14 @@ # ############################################################################## -''' +""" package for organizing program configurations. It can read/write configurations file, parse arguments from command lines, and also parse arguments passed from method/function calling inside python. - + Note: for python 2.6, argparse and orderedDict is required, install them with easy_install -''' +""" import ConfigParser @@ -29,31 +29,41 @@ import sys from functools import partial import argparse + try: from collections import OrderedDict except: from ordereddict import OrderedDict -from diffpy.confutils.tools import _configPropertyRad, _configPropertyR, \ - _configPropertyRW, str2bool, opt2Str, str2Opt, StrConv, FakeConfigFile +from diffpy.confutils.tools import ( + _configPropertyRad, + _configPropertyR, + _configPropertyRW, + str2bool, + opt2Str, + str2Opt, + StrConv, + FakeConfigFile, +) + class ConfigBase(object): - ''' + """ _optdatalist_default, _optdatalist are metadata used to initialize the options, see below for examples - + options presents in --help (in cmd), config file, headers have same order as in these list, so arrange them in right order here. - + optional args to control if the options presents in args, config file or file header - + 'args' - default is 'a' if 'a', this option will be available in self.args if 'n', this option will not be available in self.args 'config' - default is 'a' if 'f', this option will present in self.config and be written to - config file only in full mode + config file only in full mode if 'a', this option will present in self.config and be written to config file both in full and short mode if 'n', this option will not present in self.config @@ -62,21 +72,19 @@ class ConfigBase(object): if 'a', this option will be written to header both in full and short mode if 'n', this option will not be written to header - + so in short mode, all options with 'a' will be written, in full mode, all options with 'a' or 'f' will be written - ''' + """ # Text to display before the argument help - _description = \ - '''Description of configurations - ''' + _description = """Description of configurations + """ # Text to display after the argument help - _epilog = \ - ''' - ''' + _epilog = """ + """ - ''' + """ optdata contains these keys: these args will be passed to argparse, see the documents of argparse for detail information @@ -92,100 +100,171 @@ class ConfigBase(object): 'r': required 'de': dest 'co': const - ''' - _optdatanamedict = {'h':'help', - 't':'type', - 'a':'action', - 'n':'nargs', - 'd':'default', - 'c':'choices', - 'r':'required', - 'de':'dest', - 'co':'const'} + """ + _optdatanamedict = { + "h": "help", + "t": "type", + "a": "action", + "n": "nargs", + "d": "default", + "c": "choices", + "r": "required", + "de": "dest", + "co": "const", + } # examples, overload it _optdatalist_default = [ - ['configfile', {'sec':'Control', 'config':'f', 'header':'n', - 's':'c', - 'h':'name of input config file', - 'd':'', }], - ['createconfig', {'sec':'Control', 'config':'n', 'header':'n', - 'h':'create a config file according to default or current values', - 'd':'', }], - ['createconfigfull', {'sec':'Control', 'config':'n', 'header':'n', - 'h':'create a full configurable config file', - 'd':'', }], - ] + [ + "configfile", + { + "sec": "Control", + "config": "f", + "header": "n", + "s": "c", + "h": "name of input config file", + "d": "", + }, + ], + [ + "createconfig", + { + "sec": "Control", + "config": "n", + "header": "n", + "h": "create a config file according to default or current values", + "d": "", + }, + ], + [ + "createconfigfull", + { + "sec": "Control", + "config": "n", + "header": "n", + "h": "create a full configurable config file", + "d": "", + }, + ], + ] # examples, overload it _optdatalist = [ - ['tifdirectory', {'sec':'Experiment', 'header':'n', - 's':'tifdir', - 'h':'directory of raw tif files', - 'd':'currentdir', }], - ['integrationspace', {'sec':'Experiment', - 'h':'integration space, could be twotheta or qspace', - 'd':'twotheta', - 'c':['twotheta', 'qspace'], }], - ['wavelength', {'sec':'Experiment', - 'h':'wavelength of x-ray, in A', - 'd':0.1000, }], - ['rotationd', {'sec':'Experiment', - 's':'rot', - 'h':'rotation angle of tilt plane, in degree', - 'd':0.0, }], - ['includepattern', {'sec':'Beamline', - 's':'ipattern', - 'h':'file name pattern for included files', - 'n':'*', - 'd':['*.tif'], }], - ['excludepattern', {'sec':'Beamline', - 's':'epattern', - 'h':'file name pattern for excluded files', - 'n':'*', - 'd':['*.dark.tif', '*.raw.tif'], }], - ['fliphorizontal', {'sec':'Beamline', - 'h':'filp the image horizontally', - 'n':'?', - 'co':True, - 'd':False, }], - ['regulartmatrixenable', {'sec':'Others', - 'h':'normalize tmatrix in splitting method', - 'n':'?', - 'co':True, - 'd':False, }], - ['maskedges', {'sec':'Others', 'config':'f', 'header':'f', - 'h':'mask the edge pixels, first four means the number of pixels masked in each edge \ - (left, right, top, bottom), the last one is the radius of a region masked around the corner', - 'n':5, - 'd':[1, 1, 1, 1, 50], }], - ] + [ + "tifdirectory", + { + "sec": "Experiment", + "header": "n", + "s": "tifdir", + "h": "directory of raw tif files", + "d": "currentdir", + }, + ], + [ + "integrationspace", + { + "sec": "Experiment", + "h": "integration space, could be twotheta or qspace", + "d": "twotheta", + "c": ["twotheta", "qspace"], + }, + ], + [ + "wavelength", + { + "sec": "Experiment", + "h": "wavelength of x-ray, in A", + "d": 0.1000, + }, + ], + [ + "rotationd", + { + "sec": "Experiment", + "s": "rot", + "h": "rotation angle of tilt plane, in degree", + "d": 0.0, + }, + ], + [ + "includepattern", + { + "sec": "Beamline", + "s": "ipattern", + "h": "file name pattern for included files", + "n": "*", + "d": ["*.tif"], + }, + ], + [ + "excludepattern", + { + "sec": "Beamline", + "s": "epattern", + "h": "file name pattern for excluded files", + "n": "*", + "d": ["*.dark.tif", "*.raw.tif"], + }, + ], + [ + "fliphorizontal", + { + "sec": "Beamline", + "h": "filp the image horizontally", + "n": "?", + "co": True, + "d": False, + }, + ], + [ + "regulartmatrixenable", + { + "sec": "Others", + "h": "normalize tmatrix in splitting method", + "n": "?", + "co": True, + "d": False, + }, + ], + [ + "maskedges", + { + "sec": "Others", + "config": "f", + "header": "f", + "h": "mask the edge pixels, first four means the number of pixels masked in each edge \ + (left, right, top, bottom), the last one is the radius of a region masked around the corner", + "n": 5, + "d": [1, 1, 1, 1, 50], + }, + ], + ] # some default data # configfile: default config file name # headertitle: default title of header - _defaultdata = {'configfile': ['config.cfg'], - 'headertitle': 'Configuration information' - } - + _defaultdata = { + "configfile": ["config.cfg"], + "headertitle": "Configuration information", + } def __init__(self, filename=None, args=None, **kwargs): - ''' - init the class and update the values of options if specified in + """ + init the class and update the values of options if specified in filename/args/kwargs - + it will: 1. call self._preInit method 2. find the config file if specified in filename/args/kwargs if failed, try to find default config file 3. update the options value using filename/args/kwargs file > args > kwargs - + :param filename: str, file name of the config file :param args: list of str, args passed from cmd :param kwargs: dict, optional kwargs - + :return: None - ''' + """ # call self._preInit self._preInit(**kwargs) @@ -196,11 +275,11 @@ def __init__(self, filename=None, args=None, **kwargs): # example, overload it def _preInit(self, **kwargs): - ''' + """ method called in init process, overload it! - + this method will be called before reading config from file/args/kwargs - ''' + """ # for name in ['rotation']: # setattr(self.__class__, name, _configPropertyRad(name+'d')) # self._configlist['Experiment'].extend(['rotation']) @@ -209,62 +288,62 @@ def _preInit(self, **kwargs): ########################################################################### def _findConfigFile(self, filename=None, args=None, **kwargs): - ''' + """ find config file, if any config is specified in filename/args/kwargs - then return the filename of config. - + then return the filename of config. + :param filename: str, file name of config file :param filename: list of str, args passed from cmd :param kwargs: optional kwargs - - :return: name of config file if found, otherwise None - ''' + + :return: name of config file if found, otherwise None + """ rv = None - if (filename != None): + if filename != None: rv = filename - if (args != None): - if ('--configfile' in args) or ('-c' in args): + if args != None: + if ("--configfile" in args) or ("-c" in args): obj = self.args.parse_args(args) rv = obj.configfile - if kwargs.has_key('configfile'): - rv = kwargs['configfile'] + if kwargs.has_key("configfile"): + rv = kwargs["configfile"] return rv def _findDefaultConfigFile(self, filename=None, args=None, **kwargs): - ''' + """ find default config file, if any config is specified in filename/args/kwargs or in self._defaultdata['configfile'], then return the filename of config. - + kwargs > args > filename > default - + param filename: str, file name of config file param filename: list of str, args passed from cmd param kwargs: optional kwargs - - return: name of config file if found, otherwise None - ''' + + return: name of config file if found, otherwise None + """ rv = self._findConfigFile(filename, args, **kwargs) if rv == None: - for dconf in self._defaultdata['configfile']: - if (os.path.exists(dconf))and(rv == None): + for dconf in self._defaultdata["configfile"]: + if (os.path.exists(dconf)) and (rv == None): rv = dconf return rv ########################################################################### def _updateSelf(self, optnames=None, **kwargs): - ''' + """ update the options value, then copy the values in the self.'options' to self.config - + 1. call self._preUpdateSelf 2. apply options' value from *self.option* to self.config 3. call self._postUpdateSelf - + :param optnames: str or list of str, name of options whose value has been changed, if None, update all options - ''' + """ # so some check right here self._preUpdateSelf(**kwargs) # copy value to self.config @@ -275,90 +354,90 @@ def _updateSelf(self, optnames=None, **kwargs): # example, overload it def _preUpdateSelf(self, **kwargs): - ''' + """ additional process called in self._updateSelf, this method is called before self._copySelftoConfig(), i.e. before copy options value to self.config (config file) - ''' + """ return def _postUpdateSelf(self, **kwargs): - ''' + """ additional process called in self._updateSelf, this method is called after self._copySelftoConfig(), i.e. before copy options value to self.config (config file) - ''' + """ return ########################################################################### def _getTypeStr(self, optname): - ''' + """ return the type of option - + :param optname: str, name of option - - :return: string, type of the option - ''' + + :return: string, type of the option + """ opttype = self._getTypeStrC(optname) return opttype @classmethod def _getTypeStrC(cls, optname): - ''' + """ class method, return the type of option first try to get type information from metadata, if failed, try - to get type from default value - + to get type from default value + :param optname: str, name of option - - :return: string, type of the option - ''' + + :return: string, type of the option + """ optdata = cls._optdata[optname] - if optdata.has_key('t'): - opttype = optdata['t'] + if optdata.has_key("t"): + opttype = optdata["t"] else: - value = optdata['d'] + value = optdata["d"] if isinstance(value, str): - opttype = 'str' + opttype = "str" elif isinstance(value, bool): - opttype = 'bool' + opttype = "bool" elif isinstance(value, float): - opttype = 'float' + opttype = "float" elif isinstance(value, int): - opttype = 'int' + opttype = "int" elif isinstance(value, list): if len(value) == 0: - opttype = 'strlist' + opttype = "strlist" elif isinstance(value[0], str): - opttype = 'strlist' + opttype = "strlist" elif isinstance(value[0], bool): - opttype = 'boollist' + opttype = "boollist" elif isinstance(value[0], float): - opttype = 'floatlist' + opttype = "floatlist" elif isinstance(value[0], int): - opttype = 'intlist' + opttype = "intlist" return opttype ########################################################################### def _detectAddSections(self): - ''' + """ detect sections present in self._optdata and add them to self.config also add it to self._configlist - ''' + """ self._detectAddSectionsC(self) return @classmethod def _detectAddSectionsC(cls): - ''' + """ class method, detect sections present in self._optdata and add them to self.config also add it to self._configlist - ''' + """ # seclist = [self._optdata[key]['sec'] for key in self._optdata.keys()] - seclist = [cls._optdata[opt[0]]['sec'] for opt in cls._optdatalist] + seclist = [cls._optdata[opt[0]]["sec"] for opt in cls._optdatalist] secdict = OrderedDict.fromkeys(seclist) # for sec in set(seclist): for sec in secdict.keys(): @@ -367,75 +446,81 @@ class method, detect sections present in self._optdata and add them to self.conf return def _addOpt(self, optname): - ''' + """ add options to self.config and self.args and self.*option*, this will read metadata from self._optdatalist - + :param optname: string, name of option - ''' + """ self._addOptC(self, optname) return @classmethod def _addOptC(cls, optname): - ''' + """ Class method, add options to self.config and self.args and self.*option*, this will read metadata in self._optdatalist - + :param optname: string, name of option - ''' + """ optdata = cls._optdata[optname] opttype = cls._getTypeStrC(optname) # replace currentdir in default to os.getcwd() - if optdata['d'] == 'currentdir': - optdata['d'] = os.getcwd() + if optdata["d"] == "currentdir": + optdata["d"] = os.getcwd() # add to cls.'optname' cls._addOptSelfC(optname, optdata) # add to cls.config - secname = optdata['sec'] if optdata.has_key('sec') else 'Others' + secname = optdata["sec"] if optdata.has_key("sec") else "Others" cls._configlist[secname].append(optname) - if optdata.get('config', 'a') != 'n': - strvalue = ', '.join(map(str, optdata['d'])) if isinstance(optdata['d'], list) else str(optdata['d']) + if optdata.get("config", "a") != "n": + strvalue = ( + ", ".join(map(str, optdata["d"])) + if isinstance(optdata["d"], list) + else str(optdata["d"]) + ) cls.config.set(secname, optname, strvalue) # add to cls.args - if optdata.get('args', 'a') != 'n': + if optdata.get("args", "a") != "n": # transform optdata to a dict that can pass to add_argument method pargs = dict() for key in optdata.keys(): if cls._optdatanamedict.has_key(key): pargs[cls._optdatanamedict[key]] = optdata[key] - pargs['default'] = argparse.SUPPRESS - pargs['type'] = StrConv(opttype) + pargs["default"] = argparse.SUPPRESS + pargs["type"] = StrConv(opttype) # add args - if optdata.has_key('f'): + if optdata.has_key("f"): cls.args.add_argument(optname, **pargs) - elif optdata.has_key('s'): - cls.args.add_argument('--' + optname, '-' + optdata['s'], **pargs) + elif optdata.has_key("s"): + cls.args.add_argument( + "--" + optname, "-" + optdata["s"], **pargs + ) else: - cls.args.add_argument('--' + optname, **pargs) + cls.args.add_argument("--" + optname, **pargs) return @classmethod def _addOptSelfC(cls, optname, optdata): - ''' + """ class method, assign options value to *self.option*, using metadata - + :param optname: string, name of the option :param optdata: dict, metadata of the options, get it from self._optdatalist - ''' - setattr(cls, optname, optdata['d']) + """ + setattr(cls, optname, optdata["d"]) return def _copyConfigtoSelf(self, optnames=None): - ''' + """ copy the options' value from self.config to self.*option* - + :param optnames: str or list of str, names of options whose value copied from self.config to self.*option*'. Set None to update all - ''' + """ if optnames != None: optnames = optnames if isinstance(optnames, list) else [optnames] else: @@ -445,19 +530,19 @@ def _copyConfigtoSelf(self, optnames=None): for optname in optnames: if self._optdata.has_key(optname): - secname = self._optdata[optname]['sec'] + secname = self._optdata[optname]["sec"] opttype = self._getTypeStr(optname) optvalue = self.config.get(secname, optname) setattr(self, optname, str2Opt(opttype, optvalue)) return def _copySelftoConfig(self, optnames=None): - ''' + """ copy the value from self.*option* to self.config - + :param optname: str or list of str, names of options whose value copied from self.*option* to self.config. Set None to update all - ''' + """ if optnames != None: optnames = optnames if isinstance(optnames, list) else [optnames] else: @@ -467,7 +552,7 @@ def _copySelftoConfig(self, optnames=None): for optname in optnames: if self._optdata.has_key(optname): - secname = self._optdata[optname]['sec'] + secname = self._optdata[optname]["sec"] opttype = self._getTypeStr(optname) optvalue = getattr(self, optname) self.config.set(secname, optname, opt2Str(opttype, optvalue)) @@ -476,12 +561,12 @@ def _copySelftoConfig(self, optnames=None): ########################################################################### def parseArgs(self, pargs): - ''' + """ parse args and update the value in self.*option*, this will call the self.args() to parse args, - + :param pargs: list of string, arguments to parse, usually comming from sys.argv - ''' + """ obj = self.args.parse_args(pargs) changedargs = obj.__dict__.keys() for optname in changedargs: @@ -493,11 +578,11 @@ def parseArgs(self, pargs): return obj def parseKwargs(self, **kwargs): - ''' + """ update self.*option* values according to the kwargs - + :param kwargs: dict, keywords=value - ''' + """ if kwargs != {}: changedargs = [] for optname, optvalue in kwargs.iteritems(): @@ -509,11 +594,11 @@ def parseKwargs(self, **kwargs): return def parseConfigFile(self, filename): - ''' + """ read a config file and update the self.*option* - + :param filename: str, file name of config file (include path) - ''' + """ if filename != None: filename = os.path.abspath(filename) if os.path.exists(filename): @@ -527,21 +612,21 @@ def parseConfigFile(self, filename): return def updateConfig(self, filename=None, args=None, **kwargs): - ''' + """ update config according to config file, args(from sys.argv) or **kwargs - + 1. call self._preUpdateConfig() - 2. process file/args/kwargs passed to this method, + 2. process file/args/kwargs passed to this method, 3. read a configfile if specified in args or kwargs 4. call self._postUpdateConfig() 5. write config file if specified in args/kwargs - + :param filename: str, file name of the config file - :param args: list of str, args passed from cmd, + :param args: list of str, args passed from cmd, :param kwargs: dict, optional kwargs - + :return: True if anything updated, False if nothing updated - ''' + """ # call self._preUpdateConfig self._preUpdateConfig(**kwargs) @@ -553,7 +638,11 @@ def updateConfig(self, filename=None, args=None, **kwargs): if kwargs != {}: rv = self.parseKwargs(**kwargs) - if (filename == None)and((args == None)or(args == []))and(kwargs == {}): + if ( + (filename == None) + and ((args == None) or (args == [])) + and (kwargs == {}) + ): rv = self._updateSelf() # call self._callbackUpdateConfig @@ -564,87 +653,111 @@ def updateConfig(self, filename=None, args=None, **kwargs): return rv def _preUpdateConfig(self, **kwargs): - ''' + """ Method called before parsing args or kwargs or config file, in self.updateConfig - ''' + """ return def _postUpdateConfig(self, **kwargs): - ''' + """ Method called after parsing args or kwargs or config file, in self.updateConfig - ''' + """ return ########################################################################### def _createConfigFile(self): - ''' + """ write output config file if specfied in configuration the filename is specified by self.createconfig - ''' - if (self.createconfig != '')and(self.createconfig != None): - self.writeConfig(self.createconfig, 'short') - self.createconfig = '' - if (self.createconfigfull != '')and(self.createconfigfull != None): - self.writeConfig(self.createconfigfull, 'full') - self.createconfigfull = '' + """ + if (self.createconfig != "") and (self.createconfig != None): + self.writeConfig(self.createconfig, "short") + self.createconfig = "" + if (self.createconfigfull != "") and (self.createconfigfull != None): + self.writeConfig(self.createconfigfull, "full") + self.createconfigfull = "" return - def writeConfig(self, filename, mode='short', changeconfigfile=True): - ''' + def writeConfig(self, filename, mode="short", changeconfigfile=True): + """ write config to file. the file is compatiable with python package ConfigParser - + :param filename: string, name of file :param mode: string, 'short' or 'full' ('s' or 'f'). in short mode, all options with 'a' will be written, in full mode, all options with 'a' or 'f' will be written - ''' + """ if changeconfigfile: self.configfile = os.path.abspath(filename) self._updateSelf() # func decide if wirte the option to config according to mode # options not present in self._optdata will not be written to config - if mode.startswith('s'): - mcond = lambda optname: self._optdata.get(optname, {'config':'n'}).get('config', 'a') == 'a' + if mode.startswith("s"): + mcond = ( + lambda optname: self._optdata.get( + optname, {"config": "n"} + ).get("config", "a") + == "a" + ) else: - mcond = lambda optname: self._optdata.get(optname, {'config':'n'}).get('config', 'a') != 'n' + mcond = ( + lambda optname: self._optdata.get( + optname, {"config": "n"} + ).get("config", "a") + != "n" + ) lines = [] for section in self.config._sections: tlines = [] - for (key, value) in self.config._sections[section].items(): + for key, value in self.config._sections[section].items(): if (key != "__name__") and mcond(key): - tlines.append("%s = %s" % (key, str(value).replace('\n', '\n\t'))) + tlines.append( + "%s = %s" % (key, str(value).replace("\n", "\n\t")) + ) if len(tlines) > 0: lines.append("[%s]" % section) lines.extend(tlines) - lines.append('') + lines.append("") rv = "\n".join(lines) + "\n" - fp = open(filename, 'w') + fp = open(filename, "w") fp.write(rv) fp.close() return - def getHeader(self, title=None, mode='full'): - ''' - get a header of configurations values, - + def getHeader(self, title=None, mode="full"): + """ + get a header of configurations values, + :param title: str, title of header, if None, try to get it from self.defaultvalue :param mode: string, 'short' or 'full' ('s' or 'f'). in short mode, all options with 'a' will be written, in full mode, all options with 'a' or 'f' will be written - + :return: string, lines with line break that can be directly writen to a text file - ''' + """ lines = [] - title = '# %s #' % (self._defaultdata['headertitle'] if title == None else title) + title = "# %s #" % ( + self._defaultdata["headertitle"] if title == None else title + ) lines.append(title) # func decide if wirte the option to header according to mode # options not present in self._optdata will not be written to header - if mode.startswith('s'): - mcond = lambda optname: self._optdata.get(optname, {'header':'n'}).get('header', 'a') == 'a' + if mode.startswith("s"): + mcond = ( + lambda optname: self._optdata.get( + optname, {"header": "n"} + ).get("header", "a") + == "a" + ) else: - mcond = lambda optname: self._optdata.get(optname, {'header':'n'}).get('header', 'a') != 'n' + mcond = ( + lambda optname: self._optdata.get( + optname, {"header": "n"} + ).get("header", "a") + != "n" + ) for secname in self._configlist.keys(): tlines = [] @@ -652,46 +765,52 @@ def getHeader(self, title=None, mode='full'): if mcond(optname): value = getattr(self, optname) ttype = self._getTypeStr(optname) - strvalue = ', '.join(map(str, value)) if ttype.endswith('list') else str(value) + strvalue = ( + ", ".join(map(str, value)) + if ttype.endswith("list") + else str(value) + ) tlines.append("%s = %s" % (optname, strvalue)) if len(tlines) > 0: lines.append("[%s]" % secname) lines.extend(tlines) - lines.append('') + lines.append("") rv = "\n".join(lines) + "\n" return rv def resetDefault(self, optnames=None): - ''' + """ reset all values to their default value - + :param optnames: list of str, name of options to reset, None for all options - ''' + """ if optnames == None: optnames = self._optdata.keys() for optname in optnames: if self._optdata.has_key(optname): - setattr(self, optname, self._optdata[optname]['d']) + setattr(self, optname, self._optdata[optname]["d"]) self._updateSelf() return ########################################################################### - #IMPORTANT call this method if you want to add options as class attributes!!! + # IMPORTANT call this method if you want to add options as class attributes!!! @classmethod def initConfigClass(cls): - ''' + """ init config class and add options to class - + IMPORTANT: call this method after you define the metadata of your config class to add options as class attributes!!! - ''' + """ cls._preInitConfigClass() cls.config = ConfigParser.ConfigParser(dict_type=OrderedDict) - cls.args = argparse.ArgumentParser(description=cls._description, - epilog=cls._epilog, - formatter_class=argparse.RawDescriptionHelpFormatter) + cls.args = argparse.ArgumentParser( + description=cls._description, + epilog=cls._epilog, + formatter_class=argparse.RawDescriptionHelpFormatter, + ) cls._configlist = OrderedDict({}) cls._optdatalist = cls._optdatalist_default + cls._optdatalist @@ -706,28 +825,29 @@ class to add options as class attributes!!! @classmethod def _postInitConfigClass(cls): - ''' + """ additional processes called after initConfigClass - + overload it - ''' + """ pass @classmethod def _preInitConfigClass(cls): - ''' + """ additional processes called before initConfigClass - + overload it - ''' + """ pass -#VERY IMPORTANT!!! + +# VERY IMPORTANT!!! # add options to class # initConfigClass(ConfigBase) # ConfigBase.initConfigClass() -if __name__ == '__main__': +if __name__ == "__main__": test = ConfigBase() test.updateConfig() diff --git a/diffpy/confutils/tools.py b/diffpy/confutils/tools.py index 3690461..dfc26c9 100644 --- a/diffpy/confutils/tools.py +++ b/diffpy/confutils/tools.py @@ -19,108 +19,123 @@ import zlib import hashlib + def _configPropertyRad(nm): - ''' + """ helper function of options delegation, rad to degree - ''' - rv = property(fget=lambda self: np.radians(getattr(self, nm)), - fset=lambda self, val: setattr(self, nm, np.degrees(val)), - fdel=lambda self: delattr(self, nm)) + """ + rv = property( + fget=lambda self: np.radians(getattr(self, nm)), + fset=lambda self, val: setattr(self, nm, np.degrees(val)), + fdel=lambda self: delattr(self, nm), + ) return rv + def _configPropertyR(name): - ''' + """ Create a property that forwards self.name to self.config.name. - + read only - ''' - rv = property(fget=lambda self: getattr(self.config, name), - doc='attribute forwarded to self.config, read-only') + """ + rv = property( + fget=lambda self: getattr(self.config, name), + doc="attribute forwarded to self.config, read-only", + ) return rv + def _configPropertyRW(name): - ''' + """ Create a property that forwards self.name to self.config.name. - + read and write - ''' - rv = property(fget=lambda self: getattr(self.config, nm), - fset=lambda self, value: setattr(self.config, nm, value), - fdel=lambda self: delattr(self, nm), - doc='attribute forwarded to self.config, read/write') + """ + rv = property( + fget=lambda self: getattr(self.config, nm), + fset=lambda self, value: setattr(self.config, nm, value), + fdel=lambda self: delattr(self, nm), + doc="attribute forwarded to self.config, read/write", + ) return rv + def str2bool(v): - ''' + """ turn string to bool - ''' + """ return v.lower() in ("yes", "true", "t", "1") + def opt2Str(opttype, optvalue): - ''' + """ turn the value of one option to string, according to the option type list of values are truned into "value1, value2, value3..." - + :param opttype: string, type of opitons, for example 'str' or 'intlist' :param optvalue: value of the option - + :return: string, usually stored in ConfigBase.config - ''' + """ - if opttype.endswith('list'): - rv = ', '.join(map(str, optvalue)) + if opttype.endswith("list"): + rv = ", ".join(map(str, optvalue)) else: rv = str(optvalue) return rv + def StrConv(opttype): - ''' + """ get the type (a converter function) according to the opttype - + the function doesn't take list - + :param opttype: string, a type of options, could be 'str', 'int', 'float', or 'bool' - - :return: type (converter function) - - ''' - if opttype.startswith('str'): + + :return: type (converter function) + + """ + if opttype.startswith("str"): conv = str - elif opttype.startswith('int'): + elif opttype.startswith("int"): conv = int - elif opttype.startswith('float'): + elif opttype.startswith("float"): conv = float - elif opttype.startswith('bool'): + elif opttype.startswith("bool"): conv = str2bool else: conv = None return conv + def str2Opt(opttype, optvalue): - ''' + """ convert the string to value of one option, according to the option type - + :param opttype: string, type of opitons, for example 'str' or 'intlist' :param optvalue: string, value of the option - + :return: value of the option, usually stored in ConfigBase.config - ''' + """ # base converter conv = StrConv(opttype) - if opttype.endswith('list'): - temp = re.split('\s*,\s*', optvalue) + if opttype.endswith("list"): + temp = re.split("\s*,\s*", optvalue) rv = map(conv, temp) if len(temp) > 0 else [] else: rv = conv(optvalue) return rv + class FakeConfigFile(object): - ''' + """ A fake configfile object used in reading config from header of data - or a real config file. - ''' - def __init__(self, configfile, endline='###'): + or a real config file. + """ + + def __init__(self, configfile, endline="###"): self.configfile = configfile self.fp = open(configfile) self.endline = endline @@ -129,36 +144,37 @@ def __init__(self, configfile, endline='###'): return def readline(self): - ''' + """ readline function - ''' + """ line = self.fp.readline() if line.startswith(self.endline) or self.ended: - rv = '' + rv = "" self.ended = True else: rv = line return rv def close(self): - ''' + """ close the file - ''' + """ self.fp.close() return + def checkCRC32(filename): - ''' + """ calculate the crc32 value of file - + :param filename: path to the file - + :return: crc32 value of file - ''' + """ try: - fd = open(filename, 'rb') + fd = open(filename, "rb") except: - return 'Read error' + return "Read error" eachLine = fd.readline() prev = 0 while eachLine: @@ -167,18 +183,19 @@ def checkCRC32(filename): fd.close() return prev + def checkMD5(filename, blocksize=65536): - ''' + """ calculate the MD5 value of file - + :param filename: path to the file - + :return: md5 value of file - ''' + """ try: - fd = open(filename, 'rb') + fd = open(filename, "rb") except: - return 'Read error' + return "Read error" buf = fd.read(blocksize) md5 = hashlib.md5() while len(buf) > 0: @@ -187,14 +204,15 @@ def checkMD5(filename, blocksize=65536): fd.close() return md5.hexdigest() + def checkFileVal(filename): - ''' + """ check file integrity using crc32 and md5. It will read file twice then - compare the crc32 and md5. If two results doesn't match, it will wait until + compare the crc32 and md5. If two results doesn't match, it will wait until the file is completed written to disk. - + :param filename: path to the file - ''' + """ valflag = False lastcrc = checkCRC32(filename) while not valflag: diff --git a/diffpy/confutils/version.py b/diffpy/confutils/version.py index 4b9271d..7d4c6ed 100644 --- a/diffpy/confutils/version.py +++ b/diffpy/confutils/version.py @@ -13,19 +13,18 @@ # ############################################################################## -"""Definition of __version__, __date__, __gitsha__. -""" +"""Definition of __version__, __date__, __gitsha__.""" from pkg_resources import resource_stream from ConfigParser import SafeConfigParser # obtain version information from the version.cfg file cp = SafeConfigParser() -cp.readfp(resource_stream(__name__, 'version.cfg')) +cp.readfp(resource_stream(__name__, "version.cfg")) -__version__ = cp.get('DEFAULT', 'version') -__date__ = cp.get('DEFAULT', 'date') -__gitsha__ = cp.get('DEFAULT', 'commit') +__version__ = cp.get("DEFAULT", "version") +__date__ = cp.get("DEFAULT", "date") +__gitsha__ = cp.get("DEFAULT", "commit") del cp diff --git a/diffpy/srxplanar/calculate.py b/diffpy/srxplanar/calculate.py index f3aa395..bc91308 100644 --- a/diffpy/srxplanar/calculate.py +++ b/diffpy/srxplanar/calculate.py @@ -19,37 +19,38 @@ import scipy.ndimage.morphology as snm from diffpy.srxplanar.srxplanarconfig import _configPropertyR + class Calculate(object): - ''' + """ provide methods for integration, variance calculation and distance/Q matrix calculation etc. - ''' - # define configuration properties that are forwarded to self.config - xdimension = _configPropertyR('xdimension') - ydimension = _configPropertyR('ydimension') - xpixelsize = _configPropertyR('xpixelsize') - ypixelsize = _configPropertyR('ypixelsize') - xbeamcenter = _configPropertyR('xbeamcenter') - ybeamcenter = _configPropertyR('ybeamcenter') - rotation = _configPropertyR('rotation') - tilt = _configPropertyR('tilt') - distance = _configPropertyR('distance') - wavelength = _configPropertyR('wavelength') - integrationspace = _configPropertyR('integrationspace') - qmax = _configPropertyR('qmax') - qstep = _configPropertyR('qstep') - tthmax = _configPropertyR('tthmax') - tthstep = _configPropertyR('tthstep') - tthmaxd = _configPropertyR('tthmaxd') - tthstepd = _configPropertyR('tthstepd') - tthorqstep = _configPropertyR('tthorqstep') - tthorqmax = _configPropertyR('tthorqmax') - uncertaintyenable = _configPropertyR('uncertaintyenable') - sacorrectionenable = _configPropertyR('sacorrectionenable') - polcorrectionenable = _configPropertyR('polcorrectionenable') - polcorrectf = _configPropertyR('polcorrectf') - cropedges = _configPropertyR('cropedges') - extracrop = _configPropertyR('extracrop') + """ + # define configuration properties that are forwarded to self.config + xdimension = _configPropertyR("xdimension") + ydimension = _configPropertyR("ydimension") + xpixelsize = _configPropertyR("xpixelsize") + ypixelsize = _configPropertyR("ypixelsize") + xbeamcenter = _configPropertyR("xbeamcenter") + ybeamcenter = _configPropertyR("ybeamcenter") + rotation = _configPropertyR("rotation") + tilt = _configPropertyR("tilt") + distance = _configPropertyR("distance") + wavelength = _configPropertyR("wavelength") + integrationspace = _configPropertyR("integrationspace") + qmax = _configPropertyR("qmax") + qstep = _configPropertyR("qstep") + tthmax = _configPropertyR("tthmax") + tthstep = _configPropertyR("tthstep") + tthmaxd = _configPropertyR("tthmaxd") + tthstepd = _configPropertyR("tthstepd") + tthorqstep = _configPropertyR("tthorqstep") + tthorqmax = _configPropertyR("tthorqmax") + uncertaintyenable = _configPropertyR("uncertaintyenable") + sacorrectionenable = _configPropertyR("sacorrectionenable") + polcorrectionenable = _configPropertyR("polcorrectionenable") + polcorrectf = _configPropertyR("polcorrectf") + cropedges = _configPropertyR("cropedges") + extracrop = _configPropertyR("extracrop") def __init__(self, p): # create parameter proxy, so that parameters can be accessed by self.parametername in read-only mode @@ -58,55 +59,64 @@ def __init__(self, p): return def prepareCalculation(self): - ''' + """ prepare data for calculation - ''' + """ self.xydimension = self.xdimension * self.ydimension - self.xr = (np.arange(self.xdimension, dtype=float) - self.xbeamcenter + 0.5) * self.xpixelsize - self.yr = (np.arange(self.ydimension, dtype=float) - self.ybeamcenter + 0.5) * self.ypixelsize - - self.xr = self.xr[self.cropedges[0]:-self.cropedges[1]] - self.yr = self.yr[self.cropedges[2]:-self.cropedges[3]] - + self.xr = ( + np.arange(self.xdimension, dtype=float) - self.xbeamcenter + 0.5 + ) * self.xpixelsize + self.yr = ( + np.arange(self.ydimension, dtype=float) - self.ybeamcenter + 0.5 + ) * self.ypixelsize + + self.xr = self.xr[self.cropedges[0] : -self.cropedges[1]] + self.yr = self.yr[self.cropedges[2] : -self.cropedges[3]] + self.dmatrix = self.genDistanceMatrix() - self.azimuthmatrix = np.arctan2(self.yr.reshape(len(self.yr), 1), - self.xr.reshape(1, len(self.xr))) + self.azimuthmatrix = np.arctan2( + self.yr.reshape(len(self.yr), 1), self.xr.reshape(1, len(self.xr)) + ) self.genTTHorQMatrix() self.perviousmaskedmatrix = np.zeros(4) return def genTTHorQMatrix(self): - ''' + """ generate a twotheta matrix or q matrix which stores the tth or q value or each pixel - ''' + """ # set tth or q grid - if self.integrationspace == 'twotheta': - self.bin_edges = np.r_[0, np.arange(self.tthstep / 2, self.tthmax, self.tthstep)] + if self.integrationspace == "twotheta": + self.bin_edges = np.r_[ + 0, np.arange(self.tthstep / 2, self.tthmax, self.tthstep) + ] self.xgrid = np.degrees(self.bin_edges[1:] - self.tthstep / 2) self.tthorqmatrix = self.genTTHMatrix() - elif self.integrationspace == 'qspace': - self.bin_edges = np.r_[0, np.arange(self.qstep / 2, self.qmax, self.qstep)] + elif self.integrationspace == "qspace": + self.bin_edges = np.r_[ + 0, np.arange(self.qstep / 2, self.qmax, self.qstep) + ] self.xgrid = self.bin_edges[1:] - self.qstep / 2 self.tthorqmatrix = self.genQMatrix() return def genIntegrationInds(self, mask=None): - ''' + """ generate self.bin_number used in integration (number of pixels in on bin) - + :param mask: 2D array, mask of image, should have same dimension, 1 for masked pixel - + :return: self.bin_number - ''' + """ self.maskedmatrix = np.array(self.tthorqmatrix) if mask == None: # mask = np.zeros((self.ydimension, self.xdimension), dtype=bool) mask = np.zeros((len(self.yr), len(self.xr)), dtype=bool) ce = self.cropedges - mask = mask[ce[2]:-ce[3], ce[0]:-ce[1]] + mask = mask[ce[2] : -ce[3], ce[0] : -ce[1]] self.maskedmatrix[mask] = 1000.0 - + # extra crop maskedmatrix = self.getMaskedmatrixPic() # self.bin_number = np.array(np.histogram(maskedmatrix, self.bin_edges)[0], dtype=float) @@ -114,13 +124,13 @@ def genIntegrationInds(self, mask=None): return # self.bin_number def intensity(self, pic): - ''' + """ 2D to 1D image integration, intensity of pixels are binned and then take average, - + :param pic: 2D array, array of raw counts, corrections hould be already applied - + :retrun: 2d array, [tthorq, intensity, unceratinty] or [tthorq, intensity] - ''' + """ intensity = self.calculateIntensity(pic) if self.uncertaintyenable: @@ -129,75 +139,82 @@ def intensity(self, pic): else: rv = np.vstack([self.xgrid, intensity]) return rv - + def getMaskedmatrixPic(self, pic=None): - ''' + """ return the maskedmatrix and pic using self.extracrop and self.cropedges - + :param pic: 2d array, pic array, if None, then only return maskedmatrix - - :return: croped maskedmatrix and pic - ''' + + :return: croped maskedmatrix and pic + """ ec = self.extracrop ce = self.cropedges s = [ecx - cex if ecx > cex else 0 for ecx, cex in zip(ec, ce)] s[3] = -s[3] if s[3] != 0 else None s[1] = -s[1] if s[1] != 0 else None - rv = self.maskedmatrix[s[2]:s[3], s[0]:s[1]] - + rv = self.maskedmatrix[s[2] : s[3], s[0] : s[1]] + temps = np.array(s) if any(self.perviousmaskedmatrix != temps): - self.perviousmaskedmatrix = temps - self.bin_number = np.array(np.histogram(rv, self.bin_edges)[0], dtype=float) - self.bin_number[self.bin_number <= 0] = 1 - + self.perviousmaskedmatrix = temps + self.bin_number = np.array( + np.histogram(rv, self.bin_edges)[0], dtype=float + ) + self.bin_number[self.bin_number <= 0] = 1 + if pic != None: ps = [max(s1, s2) for s1, s2 in zip(ce, ec)] - rv = self.maskedmatrix[s[2]:s[3], s[0]:s[1]], pic[ps[2]:-ps[3], ps[0]:-ps[1]] + rv = ( + self.maskedmatrix[s[2] : s[3], s[0] : s[1]], + pic[ps[2] : -ps[3], ps[0] : -ps[1]], + ) return rv - + def calculateIntensity(self, pic): - ''' + """ calculate the 1D intensity - + :param pic: 2D array, array of raw counts, raw counts should be corrected - + :retrun: 1d array, 1D integrated intensity - ''' - + """ + maskedmatrix, pic = self.getMaskedmatrixPic(pic) - + intensity = np.histogram(maskedmatrix, self.bin_edges, weights=pic)[0] return intensity / self.bin_number def calculateVariance(self, pic): - ''' + """ calculate the 1D intensity - + :param pic: 2D array, array of raw counts, corrections hould be already applied - + :retrun: 1d array, variance of integrated intensity - ''' + """ maskedmatrix = self.getMaskedmatrixPic() - + picvar = self.calculateVarianceLocal(pic) - variance = np.histogram(maskedmatrix, self.bin_edges, weights=picvar)[0] + variance = np.histogram(maskedmatrix, self.bin_edges, weights=picvar)[ + 0 + ] return variance / self.bin_number def calculateVarianceLocal(self, pic): - ''' - calculate the variance of raw counts of each pixel are calculated according to their + """ + calculate the variance of raw counts of each pixel are calculated according to their loacl variance. - + :param pic: 2d array, 2d image array, corrections hould be already applied - + :return: 2d array, variance of each pixel - ''' + """ maskedmatrix, pic = self.getMaskedmatrixPic(pic) - - picavg = snf.uniform_filter(pic, 5, mode='wrap') + + picavg = snf.uniform_filter(pic, 5, mode="wrap") pics2 = (pic - picavg) ** 2 - pvar = snf.uniform_filter(pics2, 5, mode='wrap') + pvar = snf.uniform_filter(pics2, 5, mode="wrap") gain = pvar / pic inds = np.nonzero(np.logical_and(np.isnan(gain), np.isinf(gain))) @@ -207,11 +224,11 @@ def calculateVarianceLocal(self, pic): return var def genDistanceMatrix(self): - ''' + """ Calculate the distance matrix - + :return: 2d array, distance between source and each pixel - ''' + """ sinr = np.sin(-self.rotation) cosr = np.cos(-self.rotation) sint = np.sin(self.tilt) @@ -224,16 +241,16 @@ def genDistanceMatrix(self): dmatrix = np.zeros((len(self.yr), len(self.xr)), dtype=float) dmatrix += ((self.xr - sourcexr) ** 2).reshape(1, len(self.xr)) dmatrix += ((self.yr - sourceyr) ** 2).reshape(len(self.yr), 1) - dmatrix += sourcezr ** 2 + dmatrix += sourcezr**2 self.dmatrix = np.sqrt(dmatrix) return self.dmatrix def genTTHMatrix(self): - ''' - Calculate the diffraction angle matrix - + """ + Calculate the diffraction angle matrix + :return: 2d array, two theta angle (in radians) of each pixel's center - ''' + """ sinr = np.sin(-self.rotation) cosr = np.cos(-self.rotation) @@ -245,19 +262,23 @@ def genTTHMatrix(self): # tthmatrix1 = np.zeros((self.ydimension, self.xdimension), dtype=float) tthmatrix1 = np.zeros((len(self.yr), len(self.xr)), dtype=float) - tthmatrix1 += ((-self.xr + sourcexr) * sourcexr).reshape(1, len(self.xr)) - tthmatrix1 += ((-self.yr + sourceyr) * sourceyr).reshape(len(self.yr), 1) + tthmatrix1 += ((-self.xr + sourcexr) * sourcexr).reshape( + 1, len(self.xr) + ) + tthmatrix1 += ((-self.yr + sourceyr) * sourceyr).reshape( + len(self.yr), 1 + ) tthmatrix1 += sourcezr * sourcezr tthmatrix = np.arccos(tthmatrix1 / self.dmatrix / self.distance) self.tthmatrix = tthmatrix return tthmatrix def genQMatrix(self): - ''' - Calculate the q matrix - + """ + Calculate the q matrix + :return: 2d array, q value of each pixel's center - ''' + """ sinr = np.sin(-self.rotation) cosr = np.cos(-self.rotation) sint = np.sin(self.tilt) @@ -268,8 +289,12 @@ def genQMatrix(self): # tthmatrix1 = np.zeros((self.ydimension, self.xdimension), dtype=float) tthmatrix1 = np.zeros((len(self.yr), len(self.xr)), dtype=float) - tthmatrix1 += ((-self.xr + sourcexr) * sourcexr).reshape(1, len(self.xr)) - tthmatrix1 += ((-self.yr + sourceyr) * sourceyr).reshape(len(self.yr), 1) + tthmatrix1 += ((-self.xr + sourcexr) * sourcexr).reshape( + 1, len(self.xr) + ) + tthmatrix1 += ((-self.yr + sourceyr) * sourceyr).reshape( + len(self.yr), 1 + ) tthmatrix1 += sourcezr * sourcezr tthmatrix = np.arccos(tthmatrix1 / self.dmatrix / self.distance) self.tthmatrix = tthmatrix @@ -277,41 +302,46 @@ def genQMatrix(self): return Q def genCorrectionMatrix(self): - ''' + """ generate correction matrix. multiple the 2D raw counts array by this correction matrix to get corrected raw counts. It will calculate solid angle correction or polarization correction. - + :return: 2d array, correction matrix to apply on the image - ''' + """ rv = self._solidAngleCorrection() * self._polarizationCorrection() return rv def _solidAngleCorrection(self): - ''' + """ generate correction matrix of soild angle correction for 2D flat detector. - + :return: 2d array, correction matrix to apply on the image - ''' + """ if self.sacorrectionenable: sourcezr = self.distance * np.cos(self.tilt) - correction = (self.dmatrix / sourcezr) + correction = self.dmatrix / sourcezr else: correction = np.ones((len(self.yr), len(self.xr))) return correction def _polarizationCorrection(self): - ''' + """ generate correction matrix of polarization correction for powder diffraction for 2D flat detector. require the self.polcorrectf factor in configuration. :return: 2d array, correction matrix to apply on the image - ''' + """ if self.polcorrectionenable: # tthmatrix = self.tthorqmatrix if self.integrationspace == 'twotheta' else self.genTTHMatrix() tthmatrix = self.tthmatrix azimuthmatrix = self.azimuthmatrix p = 0.5 * (1 + (np.cos(tthmatrix)) ** 2) - p1 = 0.5 * self.polcorrectf * np.cos(2 * azimuthmatrix) * (np.sin(tthmatrix)) ** 2 + p1 = ( + 0.5 + * self.polcorrectf + * np.cos(2 * azimuthmatrix) + * (np.sin(tthmatrix)) ** 2 + ) p = 1.0 / (p - p1) else: # p = np.ones((self.ydimension, self.xdimension)) diff --git a/diffpy/srxplanar/loadimage.py b/diffpy/srxplanar/loadimage.py index 5a2c660..a305bf9 100644 --- a/diffpy/srxplanar/loadimage.py +++ b/diffpy/srxplanar/loadimage.py @@ -27,6 +27,7 @@ def openImage(im): rv = fabio.openimage.openimage(im) return rv.data + except: import tifffile @@ -36,31 +37,32 @@ def openImage(im): class LoadImage(object): - ''' - provide methods to filter files and load images - ''' + """ + provide methods to filter files and load images + """ + # define configuration properties that are forwarded to self.config - xdimension = _configPropertyR('xdimension') - ydimension = _configPropertyR('ydimension') - opendirectory = _configPropertyR('opendirectory') - filenames = _configPropertyR('filenames') - includepattern = _configPropertyR('includepattern') - excludepattern = _configPropertyR('excludepattern') - fliphorizontal = _configPropertyR('fliphorizontal') - flipvertical = _configPropertyR('flipvertical') + xdimension = _configPropertyR("xdimension") + ydimension = _configPropertyR("ydimension") + opendirectory = _configPropertyR("opendirectory") + filenames = _configPropertyR("filenames") + includepattern = _configPropertyR("includepattern") + excludepattern = _configPropertyR("excludepattern") + fliphorizontal = _configPropertyR("fliphorizontal") + flipvertical = _configPropertyR("flipvertical") def __init__(self, p): self.config = p return def flipImage(self, pic): - ''' + """ flip image if configured in config :param pic: 2d array, image array :return: 2d array, flipped image array - ''' + """ if self.fliphorizontal: pic = np.array(pic[:, ::-1]) if self.flipvertical: @@ -68,14 +70,14 @@ def flipImage(self, pic): return pic def loadImage(self, filename): - ''' + """ load image file, if failed (for example loading an incomplete file), then it will keep trying loading file for 5s :param filename: str, image file name :return: 2d ndarray, 2d image array (flipped) - ''' + """ if os.path.exists(filename): filenamefull = filename else: @@ -85,7 +87,7 @@ def loadImage(self, filename): i = 0 while i < 10: try: - if os.path.splitext(filenamefull)[-1] == '.npy': + if os.path.splitext(filenamefull)[-1] == ".npy": image = np.load(filenamefull) else: image = openImage(filenamefull) @@ -97,43 +99,62 @@ def loadImage(self, filename): image[image < 0] = 0 return image - def genFileList(self, filenames=None, opendir=None, includepattern=None, excludepattern=None, fullpath=False): - ''' + def genFileList( + self, + filenames=None, + opendir=None, + includepattern=None, + excludepattern=None, + fullpath=False, + ): + """ generate the list of file in opendir according to include/exclude pattern :param filenames: list of str, list of file name patterns, all files match ANY pattern in this list will be included :param opendir: str, the directory to get files - :param includepattern: list of str, list of wildcard of files that will be loaded, - all files match ALL patterns in this list will be included + :param includepattern: list of str, list of wildcard of files that will be loaded, + all files match ALL patterns in this list will be included :param excludepattern: list of str, list of wildcard of files that will be blocked, any files match ANY patterns in this list will be blocked :param fullpath: bool, if true, return the full path of each file :return: list of str, a list of filenames - ''' + """ fileset = self.genFileSet( - filenames, opendir, includepattern, excludepattern, fullpath) + filenames, opendir, includepattern, excludepattern, fullpath + ) return sorted(list(fileset)) - def genFileSet(self, filenames=None, opendir=None, includepattern=None, excludepattern=None, fullpath=False): - ''' + def genFileSet( + self, + filenames=None, + opendir=None, + includepattern=None, + excludepattern=None, + fullpath=False, + ): + """ generate the list of file in opendir according to include/exclude pattern :param filenames: list of str, list of file name patterns, all files match ANY pattern in this list will be included :param opendir: str, the directory to get files - :param includepattern: list of str, list of wildcard of files that will be loaded, - all files match ALL patterns in this list will be included + :param includepattern: list of str, list of wildcard of files that will be loaded, + all files match ALL patterns in this list will be included :param excludepattern: list of str, list of wildcard of files that will be blocked, any files match ANY patterns in this list will be blocked :param fullpath: bool, if true, return the full path of each file :return: set of str, a list of filenames - ''' + """ filenames = self.filenames if filenames == None else filenames opendir = self.opendirectory if opendir == None else opendir - includepattern = self.includepattern if includepattern == None else includepattern - excludepattern = self.excludepattern if excludepattern == None else excludepattern + includepattern = ( + self.includepattern if includepattern == None else includepattern + ) + excludepattern = ( + self.excludepattern if excludepattern == None else excludepattern + ) # filter the filenames according to include and exclude pattern filelist = os.listdir(opendir) fileset = set() @@ -149,6 +170,7 @@ def genFileSet(self, filenames=None, opendir=None, includepattern=None, excludep fileset = fileset1 if fullpath: filelist = map( - lambda x: os.path.abspath(os.path.join(opendir, x)), fileset) + lambda x: os.path.abspath(os.path.join(opendir, x)), fileset + ) fileset = set(filelist) return fileset diff --git a/diffpy/srxplanar/mask.py b/diffpy/srxplanar/mask.py index e36be3e..9fc088a 100644 --- a/diffpy/srxplanar/mask.py +++ b/diffpy/srxplanar/mask.py @@ -1,251 +1,281 @@ -#!/usr/bin/env python -############################################################################## -# -# diffpy.srxplanar by DANSE Diffraction group -# Simon J. L. Billinge -# (c) 2010 Trustees of the Columbia University -# in the City of New York. All rights reserved. -# -# File coded by: Xiaohao Yang -# -# See AUTHORS.txt for a list of people who contributed. -# See LICENSE.txt for license information. -# -############################################################################## - -import numpy as np -import scipy.sparse as ssp -try: - import fabio - def openImage(im): - rv = fabio.openimage.openimage(im) - return rv.data -except: - import tifffile - print 'Only tiff or .npy mask is support since fabio is not available' - def openImage(im): - try: - rv = tifffile.imread(im) - except: - rv = 0 - return rv - -import scipy.ndimage.filters as snf -import scipy.ndimage.morphology as snm -import os -from diffpy.srxplanar.srxplanarconfig import _configPropertyR - -class Mask(object): - ''' - provide methods for mask generation, including: - - static mask: tif mask, npy mask - dymanic mask: masking dark pixels, bright pixels - - ''' - - xdimension = _configPropertyR('xdimension') - ydimension = _configPropertyR('ydimension') - fliphorizontal = _configPropertyR('fliphorizontal') - flipvertical = _configPropertyR('flipvertical') - wavelength = _configPropertyR('wavelength') - maskfile = _configPropertyR('maskfile') - brightpixelmask = _configPropertyR('brightpixelmask') - darkpixelmask = _configPropertyR('darkpixelmask') - cropedges = _configPropertyR('cropedges') - avgmask = _configPropertyR('avgmask') - - def __init__(self, p, calculate): - self.config = p - self.staticmask = np.zeros((self.ydimension, self.xdimension)) - self.dynamicmask = None - self.calculate = calculate - return - - def staticMask(self, maskfile=None): - ''' - create a static mask according existing mask file. This mask remain unchanged for different images - - :param maskfile: string, file name of mask, - mask file supported: .npy, .tif file, ATTN: mask in .npy form should be already flipped, - and 1 (or larger) stands for masked pixels, 0(<0) stands for unmasked pixels - - :return: 2d array of boolean, 1 stands for masked pixel - ''' - maskfile = self.maskfile if maskfile == None else maskfile - - if os.path.exists(maskfile): - if maskfile.endswith('.npy'): - rv = np.load(maskfile) - elif maskfile.endswith('.tif'): - immask = openImage(maskfile) - rv = self.flipImage(immask) - else: - rv = np.zeros((self.ydimension, self.xdimension)) - - self.staticmask = (rv > 0) - return self.staticmask - - def dynamicMask(self, pic, dymask=None, brightpixelmask=None, darkpixelmask=None, avgmask=None): - ''' - create a dynamic mask according to image array. This mask changes for different images - - :param pic: 2d array, image array to be processed - :parma dymask: 2d array, mask array used in average mask calculation - :param brightpixelmask: pixels with much lower intensity compare to adjacent pixels will be masked - :param darkpixelmask: pixels with much higher intensity compare to adjacent pixels will be masked - :param avgmask: Mask the pixels too bright or too dark compared to the average intensity at the similar diffraction angle - - :return: 2d array of boolean, 1 stands for masked pixel - ''' - - brightpixelmask = self.brightpixelmask if brightpixelmask == None else brightpixelmask - darkpixelmask = self.darkpixelmask if darkpixelmask == None else darkpixelmask - avgmask = self.avgmask if avgmask == None else avgmask - - if darkpixelmask or brightpixelmask or avgmask: - rv = np.zeros((self.ydimension, self.xdimension)) - if darkpixelmask: - rv += self.darkPixelMask(pic) - if brightpixelmask: - rv += self.brightPixelMask(pic) - if avgmask: - rv += self.avgMask(pic, dymask=dymask) - self.dynamicmask = (rv > 0) - else: - self.dynamicmask = None - return self.dynamicmask - - def edgeMask(self, cropedges=None): - ''' - generate edge mask - - :param cropedges: crop the image, maske pixels around the image edge (left, right, - top, bottom), must larger than 0, if None, use self.corpedges - ''' - ce = self.cropedges if cropedges == None else cropedges - mask = np.ones((self.ydimension, self.xdimension), dtype=bool) - mask[ce[2]:-ce[3], ce[0]:-ce[1]] = 0 - return mask - - def avgMask(self, image, high=None, low=None, dymask=None, cropedges=None): - ''' - generate a mask that automatically mask the pixels, whose intensities are - too high or too low compare to the pixels which have similar twotheta value - - :param image: 2d array, image file (array) - :param high: float (default: 2.0), int > avgint * high will be masked - :param low: float (default: 0.5), int < avgint * low will be masked - :param dymask: 2d bool array, mask array used in calculation, True for masked pixel, - if None, then use self.staticmask - :param cropedges: crop the image, maske pixels around the image edge (left, right, - top, bottom), must larger than 0, if None, use self.config.corpedges - - :return 2d bool array, True for masked pixel, edgemake included, dymask not included - ''' - if dymask == None: - dymask = self.staticmask - high = self.config.avgmaskhigh if high == None else high - low = self.config.avgmasklow if low == None else low - - self.calculate.genIntegrationInds(dymask) - chi = self.calculate.intensity(image) - index = np.rint(self.calculate.tthorqmatrix / self.config.tthorqstep).astype(int) - index[index >= len(chi[1]) - 1] = len(chi[1]) - 1 - avgimage = chi[1][index.ravel()].reshape(index.shape) - mask = np.ones((self.ydimension, self.xdimension), dtype=bool) - ce = self.cropedges if cropedges == None else cropedges - mask[ce[2]:-ce[3], ce[0]:-ce[1]] = np.logical_or(image[ce[2]:-ce[3], ce[0]:-ce[1]] < avgimage * low, - image[ce[2]:-ce[3], ce[0]:-ce[1]] > avgimage * high) - return mask - - def darkPixelMask(self, pic, r=None): - ''' - pixels with much lower intensity compare to adjacent pixels will be masked - - :param pic: 2d array, image array to be processed - :param r: float, a threshold for masked pixels - - :return: 2d array of boolean, 1 stands for masked pixel - ''' - r = self.config.darkpixelr if r == None else r # 0.1 - - avgpic = np.average(pic) - ks = np.ones((5, 5)) - ks1 = np.ones((7, 7)) - picb = snf.percentile_filter(pic, 5, 3) < avgpic * r - picb = snm.binary_dilation(picb, structure=ks) - picb = snm.binary_erosion(picb, structure=ks1) - return picb - - def brightPixelMask(self, pic, size=None, r=None): - ''' - pixels with much higher intensity compare to adjacent pixels will be masked, - this mask is used when there are some bright spots/pixels whose intensity is higher - than its neighbors but not too high. Only use this on a very good powder averaged - data. Otherwise it may mask wrong pixels. - - This mask has similar functions as 'selfcorr' function. However, this mask will only - consider pixels' local neighbors pixels and tend to mask more pixels. While 'selfcorr' - function compare one pixel to other pixels in same bin. - - :param pic: 2d array, image array to be processed - :param size: int, size of local testing area - :param r: float, a threshold for masked pixels - - :return: 2d array of boolean, 1 stands for masked pixel - ''' - size = self.config.brightpixelsize if size == None else size # 5 - r = self.config.brightpixelr if r == None else r # 1.2 - - rank = snf.rank_filter(pic, -size, size) - ind = snm.binary_dilation(pic > rank * r, np.ones((3, 3))) - return ind - - def undersample(self, undersamplerate): - ''' - a special mask used for undesampling image. It will create a mask that - discard (total number*(1-undersamplerate)) pixels - :param undersamplerate: float, 0~1, ratio of pixels to keep - - :return: 2d array of boolean, 1 stands for masked pixel - ''' - mask = np.random.rand(self.ydimension, self.xdimension) < undersamplerate - return mask - - def flipImage(self, pic): - ''' - flip image if configured in config - - :param pic: 2d array, image array - - :return: 2d array, flipped image array - ''' - if self.fliphorizontal: - pic = pic[:, ::-1] - if self.flipvertical: - pic = pic[::-1, :] - return pic - - def saveMask(self, filename, pic=None, addmask=None): - ''' - generate a mask according to the addmask and pic. save it to .npy. 1 stands for masked pixel - the mask has same order as the pic, which means if the pic is flipped, the mask is fliped - (when pic is loaded though loadimage, it is flipped) - - :param filename: str, filename of mask file to be save - :param pic: 2d array, image array - :param addmask: list of str, control which mask to generate - - :return: 2d array of boolean, 1 stands for masked pixel - ''' - if not hasattr(self, 'mask'): - self.normalMask(addmask) - if (not hasattr(self, 'dynamicmask')) and (pic != None): - self.dynamicMask(pic, addmask=addmask) - tmask = self.mask - if hasattr(self, 'dynamicmask'): - if self.dynamicmask != None: - tmask = np.logical_or(self.mask, self.dynamicmask) if pic != None else self.mask - np.save(filename, tmask) - return tmask +#!/usr/bin/env python +############################################################################## +# +# diffpy.srxplanar by DANSE Diffraction group +# Simon J. L. Billinge +# (c) 2010 Trustees of the Columbia University +# in the City of New York. All rights reserved. +# +# File coded by: Xiaohao Yang +# +# See AUTHORS.txt for a list of people who contributed. +# See LICENSE.txt for license information. +# +############################################################################## + +import numpy as np +import scipy.sparse as ssp + +try: + import fabio + + def openImage(im): + rv = fabio.openimage.openimage(im) + return rv.data + +except: + import tifffile + + print("Only tiff or .npy mask is support since fabio is not available") + + def openImage(im): + try: + rv = tifffile.imread(im) + except: + rv = 0 + return rv + + +import scipy.ndimage.filters as snf +import scipy.ndimage.morphology as snm +import os +from diffpy.srxplanar.srxplanarconfig import _configPropertyR + + +class Mask(object): + """ + provide methods for mask generation, including: + + static mask: tif mask, npy mask + dymanic mask: masking dark pixels, bright pixels + + """ + + xdimension = _configPropertyR("xdimension") + ydimension = _configPropertyR("ydimension") + fliphorizontal = _configPropertyR("fliphorizontal") + flipvertical = _configPropertyR("flipvertical") + wavelength = _configPropertyR("wavelength") + maskfile = _configPropertyR("maskfile") + brightpixelmask = _configPropertyR("brightpixelmask") + darkpixelmask = _configPropertyR("darkpixelmask") + cropedges = _configPropertyR("cropedges") + avgmask = _configPropertyR("avgmask") + + def __init__(self, p, calculate): + self.config = p + self.staticmask = np.zeros((self.ydimension, self.xdimension)) + self.dynamicmask = None + self.calculate = calculate + return + + def staticMask(self, maskfile=None): + """ + create a static mask according existing mask file. This mask remain unchanged for different images + + :param maskfile: string, file name of mask, + mask file supported: .npy, .tif file, ATTN: mask in .npy form should be already flipped, + and 1 (or larger) stands for masked pixels, 0(<0) stands for unmasked pixels + + :return: 2d array of boolean, 1 stands for masked pixel + """ + maskfile = self.maskfile if maskfile == None else maskfile + + if os.path.exists(maskfile): + if maskfile.endswith(".npy"): + rv = np.load(maskfile) + elif maskfile.endswith(".tif"): + immask = openImage(maskfile) + rv = self.flipImage(immask) + else: + rv = np.zeros((self.ydimension, self.xdimension)) + + self.staticmask = rv > 0 + return self.staticmask + + def dynamicMask( + self, + pic, + dymask=None, + brightpixelmask=None, + darkpixelmask=None, + avgmask=None, + ): + """ + create a dynamic mask according to image array. This mask changes for different images + + :param pic: 2d array, image array to be processed + :parma dymask: 2d array, mask array used in average mask calculation + :param brightpixelmask: pixels with much lower intensity compare to adjacent pixels will be masked + :param darkpixelmask: pixels with much higher intensity compare to adjacent pixels will be masked + :param avgmask: Mask the pixels too bright or too dark compared to the average intensity at the similar diffraction angle + + :return: 2d array of boolean, 1 stands for masked pixel + """ + + brightpixelmask = ( + self.brightpixelmask + if brightpixelmask == None + else brightpixelmask + ) + darkpixelmask = ( + self.darkpixelmask if darkpixelmask == None else darkpixelmask + ) + avgmask = self.avgmask if avgmask == None else avgmask + + if darkpixelmask or brightpixelmask or avgmask: + rv = np.zeros((self.ydimension, self.xdimension)) + if darkpixelmask: + rv += self.darkPixelMask(pic) + if brightpixelmask: + rv += self.brightPixelMask(pic) + if avgmask: + rv += self.avgMask(pic, dymask=dymask) + self.dynamicmask = rv > 0 + else: + self.dynamicmask = None + return self.dynamicmask + + def edgeMask(self, cropedges=None): + """ + generate edge mask + + :param cropedges: crop the image, maske pixels around the image edge (left, right, + top, bottom), must larger than 0, if None, use self.corpedges + """ + ce = self.cropedges if cropedges == None else cropedges + mask = np.ones((self.ydimension, self.xdimension), dtype=bool) + mask[ce[2] : -ce[3], ce[0] : -ce[1]] = 0 + return mask + + def avgMask(self, image, high=None, low=None, dymask=None, cropedges=None): + """ + generate a mask that automatically mask the pixels, whose intensities are + too high or too low compare to the pixels which have similar twotheta value + + :param image: 2d array, image file (array) + :param high: float (default: 2.0), int > avgint * high will be masked + :param low: float (default: 0.5), int < avgint * low will be masked + :param dymask: 2d bool array, mask array used in calculation, True for masked pixel, + if None, then use self.staticmask + :param cropedges: crop the image, maske pixels around the image edge (left, right, + top, bottom), must larger than 0, if None, use self.config.corpedges + + :return 2d bool array, True for masked pixel, edgemake included, dymask not included + """ + if dymask == None: + dymask = self.staticmask + high = self.config.avgmaskhigh if high == None else high + low = self.config.avgmasklow if low == None else low + + self.calculate.genIntegrationInds(dymask) + chi = self.calculate.intensity(image) + index = np.rint( + self.calculate.tthorqmatrix / self.config.tthorqstep + ).astype(int) + index[index >= len(chi[1]) - 1] = len(chi[1]) - 1 + avgimage = chi[1][index.ravel()].reshape(index.shape) + mask = np.ones((self.ydimension, self.xdimension), dtype=bool) + ce = self.cropedges if cropedges == None else cropedges + mask[ce[2] : -ce[3], ce[0] : -ce[1]] = np.logical_or( + image[ce[2] : -ce[3], ce[0] : -ce[1]] < avgimage * low, + image[ce[2] : -ce[3], ce[0] : -ce[1]] > avgimage * high, + ) + return mask + + def darkPixelMask(self, pic, r=None): + """ + pixels with much lower intensity compare to adjacent pixels will be masked + + :param pic: 2d array, image array to be processed + :param r: float, a threshold for masked pixels + + :return: 2d array of boolean, 1 stands for masked pixel + """ + r = self.config.darkpixelr if r == None else r # 0.1 + + avgpic = np.average(pic) + ks = np.ones((5, 5)) + ks1 = np.ones((7, 7)) + picb = snf.percentile_filter(pic, 5, 3) < avgpic * r + picb = snm.binary_dilation(picb, structure=ks) + picb = snm.binary_erosion(picb, structure=ks1) + return picb + + def brightPixelMask(self, pic, size=None, r=None): + """ + pixels with much higher intensity compare to adjacent pixels will be masked, + this mask is used when there are some bright spots/pixels whose intensity is higher + than its neighbors but not too high. Only use this on a very good powder averaged + data. Otherwise it may mask wrong pixels. + + This mask has similar functions as 'selfcorr' function. However, this mask will only + consider pixels' local neighbors pixels and tend to mask more pixels. While 'selfcorr' + function compare one pixel to other pixels in same bin. + + :param pic: 2d array, image array to be processed + :param size: int, size of local testing area + :param r: float, a threshold for masked pixels + + :return: 2d array of boolean, 1 stands for masked pixel + """ + size = self.config.brightpixelsize if size == None else size # 5 + r = self.config.brightpixelr if r == None else r # 1.2 + + rank = snf.rank_filter(pic, -size, size) + ind = snm.binary_dilation(pic > rank * r, np.ones((3, 3))) + return ind + + def undersample(self, undersamplerate): + """ + a special mask used for undesampling image. It will create a mask that + discard (total number*(1-undersamplerate)) pixels + :param undersamplerate: float, 0~1, ratio of pixels to keep + + :return: 2d array of boolean, 1 stands for masked pixel + """ + mask = ( + np.random.rand(self.ydimension, self.xdimension) < undersamplerate + ) + return mask + + def flipImage(self, pic): + """ + flip image if configured in config + + :param pic: 2d array, image array + + :return: 2d array, flipped image array + """ + if self.fliphorizontal: + pic = pic[:, ::-1] + if self.flipvertical: + pic = pic[::-1, :] + return pic + + def saveMask(self, filename, pic=None, addmask=None): + """ + generate a mask according to the addmask and pic. save it to .npy. 1 stands for masked pixel + the mask has same order as the pic, which means if the pic is flipped, the mask is fliped + (when pic is loaded though loadimage, it is flipped) + + :param filename: str, filename of mask file to be save + :param pic: 2d array, image array + :param addmask: list of str, control which mask to generate + + :return: 2d array of boolean, 1 stands for masked pixel + """ + if not hasattr(self, "mask"): + self.normalMask(addmask) + if (not hasattr(self, "dynamicmask")) and (pic != None): + self.dynamicMask(pic, addmask=addmask) + tmask = self.mask + if hasattr(self, "dynamicmask"): + if self.dynamicmask != None: + tmask = ( + np.logical_or(self.mask, self.dynamicmask) + if pic != None + else self.mask + ) + np.save(filename, tmask) + return tmask diff --git a/diffpy/srxplanar/saveresults.py b/diffpy/srxplanar/saveresults.py index 80be1ae..9595fdd 100644 --- a/diffpy/srxplanar/saveresults.py +++ b/diffpy/srxplanar/saveresults.py @@ -18,14 +18,16 @@ import os from diffpy.srxplanar.srxplanarconfig import _configPropertyR + class SaveResults(object): - ''' - save results into files - ''' - integrationspace = _configPropertyR('integrationspace') - savedirectory = _configPropertyR('savedirectory') - gsasoutput = _configPropertyR('gsasoutput') - filenameplus = _configPropertyR('filenameplus') + """ + save results into files + """ + + integrationspace = _configPropertyR("integrationspace") + savedirectory = _configPropertyR("savedirectory") + gsasoutput = _configPropertyR("gsasoutput") + filenameplus = _configPropertyR("filenameplus") def __init__(self, p): self.config = p @@ -34,73 +36,84 @@ def __init__(self, p): def prepareCalculation(self): if not os.path.exists(self.savedirectory): - os.makedirs(self.savedirectory) + os.makedirs(self.savedirectory) return def getFilePathWithoutExt(self, filename): - ''' + """ get the normalized full path of filename with out extension - + :param filename: string, could be full path or file name only and with/without ext, only the base part of filename is used. - + :return: string, full normalized path of file without extension - ''' + """ filebase = os.path.splitext(os.path.split(filename)[1])[0] - if self.filenameplus != '' and self.filenameplus != None: - filenamep = '_'.join([filebase, self.filenameplus, self.integrationspace]) + if self.filenameplus != "" and self.filenameplus != None: + filenamep = "_".join( + [filebase, self.filenameplus, self.integrationspace] + ) else: - filenamep = '_'.join([filebase, self.integrationspace]) + filenamep = "_".join([filebase, self.integrationspace]) filepathwithoutext = os.path.join(self.savedirectory, filenamep) return filepathwithoutext def save(self, rv): - ''' + """ save diffraction intensity in .chi and gsas format(optional) - + :param rv: dict, result include integrated diffration intensity the rv['chi'] should be a 2d array with shape (2,len of intensity) or (3, len of intensity) file name is generated according to orginal file name and savedirectory - ''' - rv = self.saveChi(rv['chi'], rv['filename']) + """ + rv = self.saveChi(rv["chi"], rv["filename"]) if self.gsasoutput: - if self.gsasoutput in set(['std', 'esd', 'fxye']): - rv = [rv, self.saveGSAS(rv['chi'], rv['filename'])] + if self.gsasoutput in set(["std", "esd", "fxye"]): + rv = [rv, self.saveGSAS(rv["chi"], rv["filename"])] return rv def saveChi(self, xrd, filename): - ''' + """ save diffraction intensity in .chi - + :param xrd: 2d array with shape (2,len of intensity) or (3, len of intensity), [tthorq, intensity, (unceratinty)] - :param filename: str, base file name - ''' - filepath = self.getFilePathWithoutExt(filename) + '.chi' - f = open(filepath, 'wb') - f.write(self.config.getHeader(mode='short')) - f.write('#### start data\n') - np.savetxt(f, xrd.transpose(), fmt='%g') + :param filename: str, base file name + """ + filepath = self.getFilePathWithoutExt(filename) + ".chi" + f = open(filepath, "wb") + f.write(self.config.getHeader(mode="short")) + f.write("#### start data\n") + np.savetxt(f, xrd.transpose(), fmt="%g") f.close() return filepath def saveGSAS(self, xrd, filename): - ''' + """ save diffraction intensity in gsas format - + :param xrd: 2d array with shape (2,len of intensity) or (3, len of intensity), [tthorq, intensity, (unceratinty)] :param filename: str, base file name - ''' - filepath = self.getFilePathWithoutExt(filename) + '.gsas' - f = open(filepath, 'wb') - f.write(self.config.getHeader(mode='short')) - f.write('#### start data\n') + """ + filepath = self.getFilePathWithoutExt(filename) + ".gsas" + f = open(filepath, "wb") + f.write(self.config.getHeader(mode="short")) + f.write("#### start data\n") if xrd.shape[0] == 3: - s = writeGSASStr(os.path.splitext(path)[0], self.gsasoutput, xrd[0], xrd[1], xrd[2]) + s = writeGSASStr( + os.path.splitext(path)[0], + self.gsasoutput, + xrd[0], + xrd[1], + xrd[2], + ) elif xrd.shape[0] == 2: - s = writeGSASStr(os.path.splitext(path)[0], self.gsasoutput, xrd[0], xrd[1]) + s = writeGSASStr( + os.path.splitext(path)[0], self.gsasoutput, xrd[0], xrd[1] + ) f.write(s) f.close() return filepath + def writeGSASStr(name, mode, tth, iobs, esd=None): """ Return string of integrated intensities in GSAS format. @@ -108,7 +121,7 @@ def writeGSASStr(name, mode, tth, iobs, esd=None): :param tth: ndarray, two theta angle :param iobs: ndarray, Xrd intensity :param esd: ndarray, optional error value of intensity - + :return: string, a string to be saved to file """ maxintensity = 999999 @@ -116,39 +129,65 @@ def writeGSASStr(name, mode, tth, iobs, esd=None): logscale = min(logscale, 0) scale = 10 ** int(logscale) lines = [] - ltitle = 'Angular Profile' - ltitle += ': %s' % name - ltitle += ' scale=%g' % scale - if len(ltitle) > 80: ltitle = ltitle[:80] + ltitle = "Angular Profile" + ltitle += ": %s" % name + ltitle += " scale=%g" % scale + if len(ltitle) > 80: + ltitle = ltitle[:80] lines.append("%-80s" % ltitle) ibank = 1 nchan = len(iobs) # two-theta0 and dtwo-theta in centidegrees tth0_cdg = tth[0] * 100 dtth_cdg = (tth[-1] - tth[0]) / (len(tth) - 1) * 100 - if esd == None: mode = 'std' - if mode == 'std': + if esd == None: + mode = "std" + if mode == "std": nrec = int(numpy.ceil(nchan / 10.0)) - lbank = "BANK %5i %8i %8i CONST %9.5f %9.5f %9.5f %9.5f STD" % \ - (ibank, nchan, nrec, tth0_cdg, dtth_cdg, 0, 0) + lbank = "BANK %5i %8i %8i CONST %9.5f %9.5f %9.5f %9.5f STD" % ( + ibank, + nchan, + nrec, + tth0_cdg, + dtth_cdg, + 0, + 0, + ) lines.append("%-80s" % lbank) - lrecs = [ "%2i%6.0f" % (1, ii * scale) for ii in iobs ] + lrecs = ["%2i%6.0f" % (1, ii * scale) for ii in iobs] for i in range(0, len(lrecs), 10): - lines.append("".join(lrecs[i:i + 10])) - if mode == 'esd': + lines.append("".join(lrecs[i : i + 10])) + if mode == "esd": nrec = int(numpy.ceil(nchan / 5.0)) - lbank = "BANK %5i %8i %8i CONST %9.5f %9.5f %9.5f %9.5f ESD" % \ - (ibank, nchan, nrec, tth0_cdg, dtth_cdg, 0, 0) + lbank = "BANK %5i %8i %8i CONST %9.5f %9.5f %9.5f %9.5f ESD" % ( + ibank, + nchan, + nrec, + tth0_cdg, + dtth_cdg, + 0, + 0, + ) lines.append("%-80s" % lbank) - lrecs = [ "%8.0f%8.0f" % (ii, ee * scale) for ii, ee in zip(iobs, esd) ] + lrecs = ["%8.0f%8.0f" % (ii, ee * scale) for ii, ee in zip(iobs, esd)] for i in range(0, len(lrecs), 5): - lines.append("".join(lrecs[i:i + 5])) - if mode == 'fxye': + lines.append("".join(lrecs[i : i + 5])) + if mode == "fxye": nrec = nchan - lbank = "BANK %5i %8i %8i CONST %9.5f %9.5f %9.5f %9.5f FXYE" % \ - (ibank, nchan, nrec, tth0_cdg, dtth_cdg, 0, 0) + lbank = "BANK %5i %8i %8i CONST %9.5f %9.5f %9.5f %9.5f FXYE" % ( + ibank, + nchan, + nrec, + tth0_cdg, + dtth_cdg, + 0, + 0, + ) lines.append("%-80s" % lbank) - lrecs = [ "%22.10f%22.10f%24.10f" % (xx * scale, yy * scale, ee * scale) for xx, yy, ee in zip(tth, iobs, esd) ] + lrecs = [ + "%22.10f%22.10f%24.10f" % (xx * scale, yy * scale, ee * scale) + for xx, yy, ee in zip(tth, iobs, esd) + ] for i in range(len(lrecs)): lines.append("%-80s" % lrecs[i]) lines[-1] = "%-80s" % lines[-1] diff --git a/diffpy/srxplanar/selfcalibrate.py b/diffpy/srxplanar/selfcalibrate.py index 8d4cca6..8eab721 100644 --- a/diffpy/srxplanar/selfcalibrate.py +++ b/diffpy/srxplanar/selfcalibrate.py @@ -2,28 +2,46 @@ import scipy as sp import os from functools import partial -from scipy.optimize import minimize, leastsq, fmin_bfgs, fmin_l_bfgs_b, fmin_tnc, minimize_scalar, fmin_powell, \ - fmin_cg, fmin_slsqp, brent, golden +from scipy.optimize import ( + minimize, + leastsq, + fmin_bfgs, + fmin_l_bfgs_b, + fmin_tnc, + minimize_scalar, + fmin_powell, + fmin_cg, + fmin_slsqp, + brent, + golden, +) from matplotlib import rcParams -rcParams['backend'] = 'Qt4Agg' + +rcParams["backend"] = "Qt4Agg" try: import PySide - rcParams['backend.qt4'] = 'PySide' + + rcParams["backend.qt4"] = "PySide" import matplotlib.pyplot as plt + mplenabled = True except: - try: + try: import PyQt4 import matplotlib.pyplot as plt + mplenabled = True except: mplenabled = False -def halfcut(p, srx, image, xycenter, qind=[50, 500], show=False, mode='x', output=0): - ''' - cut the image into two half, integrate them and compare the results, if the calibration + +def halfcut( + p, srx, image, xycenter, qind=[50, 500], show=False, mode="x", output=0 +): + """ + cut the image into two half, integrate them and compare the results, if the calibration information is correct, two half should give same results. - + :param p: calibration parameters :param srx: SrXplanar object, object to do the integration :param image: str or 2d array, image to be calibrated @@ -33,104 +51,109 @@ def halfcut(p, srx, image, xycenter, qind=[50, 500], show=False, mode='x', outpu :param mode: str, mode of calibration, could be x, y, tilt, rotation, all, xy :param output: int, 0 to return one number (sum of square of difference), 1 to return the difference array - + :return: sum of square of difference or difference array - ''' - if mode == 'x': + """ + if mode == "x": srx.updateConfig(xbeamcenter=p) - elif mode == 'y': + elif mode == "y": srx.updateConfig(ybeamcenter=p) - elif mode == 'tilt': + elif mode == "tilt": srx.updateConfig(tiltd=p) - elif mode == 'rotation': + elif mode == "rotation": srx.updateConfig(rotationd=p) - elif mode == 'all': - srx.updateConfig(xbeamcenter=p[0], - ybeamcenter=p[1], - rotationd=p[2], - tiltd=p[3]) - elif mode == 'xy': - srx.updateConfig(xbeamcenter=p[0], - ybeamcenter=p[1]) - elif mode == 'show': + elif mode == "all": + srx.updateConfig( + xbeamcenter=p[0], ybeamcenter=p[1], rotationd=p[2], tiltd=p[3] + ) + elif mode == "xy": + srx.updateConfig(xbeamcenter=p[0], ybeamcenter=p[1]) + elif mode == "show": pass - + srx.prepareCalculation() - kwargs = {'savename':None, - 'savefile':False, - 'flip':False, - 'correction':False, - } - if mode != 'y': + kwargs = { + "savename": None, + "savefile": False, + "flip": False, + "correction": False, + } + if mode != "y": srx.config.extracrop = [1, srx.config.xdimension - xycenter[0], 1, 1] res1 = srx.integrate(image, **kwargs) - chi1 = res1['chi'][1][qind[0]:qind[1]] - + chi1 = res1["chi"][1][qind[0] : qind[1]] + srx.config.extracrop = [xycenter[0], 1, 1, 1] res2 = srx.integrate(image, **kwargs) - chi2 = res2['chi'][1][qind[0]:qind[1]] - - if mode != 'x': + chi2 = res2["chi"][1][qind[0] : qind[1]] + + if mode != "x": srx.config.extracrop = [1, 1, 1, srx.config.ydimension - xycenter[1]] res3 = srx.integrate(image, **kwargs) - chi3 = res3['chi'][1][qind[0]:qind[1]] - + chi3 = res3["chi"][1][qind[0] : qind[1]] + srx.config.extracrop = [1, 1, xycenter[1], 1] res4 = srx.integrate(image, **kwargs) - chi4 = res4['chi'][1][qind[0]:qind[1]] - - if mode == 'x': + chi4 = res4["chi"][1][qind[0] : qind[1]] + + if mode == "x": rv = chi1 - chi2 rv = rv / (chi1 + chi2).mean() - elif mode == 'y': + elif mode == "y": rv = chi3 - chi4 rv = rv / (chi3 + chi4).mean() else: r1 = chi1 - chi2 r2 = chi3 - chi4 - rv = np.concatenate([r1 / (chi1 + chi2).mean(), r2 / (chi3 + chi4).mean()]) - - rv0 = np.sum(rv ** 2) - print p - print rv0 + rv = np.concatenate( + [r1 / (chi1 + chi2).mean(), r2 / (chi3 + chi4).mean()] + ) + + rv0 = np.sum(rv**2) + print(p) + print(rv0) if output == 0: - rv = rv0 + rv = rv0 if show and mplenabled: - print p - print rv + print(p) + print(rv) plotRes(mode, res1, res2, res3, res4) return rv + def plotRes(mode, res1, res2, res3, res4): - ''' + """ plot results - ''' + """ plt.ion() plt.figure(1) plt.clf() - if mode != 'y': - plt.plot(res1['chi'][0], res1['chi'][1], label='left') - plt.plot(res2['chi'][0], res2['chi'][1], label='right') - if mode != 'x': - plt.plot(res3['chi'][0], res3['chi'][1], label='up') - plt.plot(res4['chi'][0], res4['chi'][1], label='down') + if mode != "y": + plt.plot(res1["chi"][0], res1["chi"][1], label="left") + plt.plot(res2["chi"][0], res2["chi"][1], label="right") + if mode != "x": + plt.plot(res3["chi"][0], res3["chi"][1], label="up") + plt.plot(res4["chi"][0], res4["chi"][1], label="down") plt.legend() plt.show() return + def minimize1(func, bounds): - ''' + """ 1d minimizer - + :param func: callable function f(x), 1d function :param bounds: (float, float), the initial bounds - + :return: float, the value of x - ''' + """ diffb = np.abs(bounds[1] - bounds[0]) if diffb > 6: - trylist = np.linspace(bounds[0], bounds[1], 3 * int(bounds[1] - bounds[0]) + 1, True) + trylist = np.linspace( + bounds[0], bounds[1], 3 * int(bounds[1] - bounds[0]) + 1, True + ) else: trylist = np.linspace(bounds[0], bounds[1], 21, True) vlow = np.inf @@ -144,21 +167,31 @@ def minimize1(func, bounds): trylist = np.linspace(rv - 0.5, rv + 0.5, 21, True) else: trylist = np.linspace(rv - diffb / 12.0, rv + diffb / 12.0, 21, True) - + for v in trylist: temp = func(v) if temp < vlow: rv = v vlow = temp - return rv - -def selfCalibrateX(srx, image, xycenter=None, mode='all', output=0, showresults=False, qrange=[None, None], **kwargs): - ''' + return rv + + +def selfCalibrateX( + srx, + image, + xycenter=None, + mode="all", + output=0, + showresults=False, + qrange=[None, None], + **kwargs +): + """ Do the self calibration using mode X - - the initial value is read from the current value of srx object, and the + + the initial value is read from the current value of srx object, and the refined results will be writrn into the srx object - + :param srx: SrXplanar object, object to do the integration :param image: str or 2d array, image to be calibrated :param xycenter: [int, int], cut position, if None, determine it using current beam center @@ -166,100 +199,172 @@ def selfCalibrateX(srx, image, xycenter=None, mode='all', output=0, showresults= :param output: int, 0 to use fmin optimizer, 1 to use leastsq optimizer :param showresults: bool, plot the halfcut result :param qrange: q range used in calculating difference - + :return: list, refined parameter - ''' + """ bak = {} - for opt in ['uncertaintyenable', 'integrationspace', 'qmax', 'qstep', - 'cropedges', 'extracrop', 'brightpixelmask', 'darkpixelmask', 'avgmask']: + for opt in [ + "uncertaintyenable", + "integrationspace", + "qmax", + "qstep", + "cropedges", + "extracrop", + "brightpixelmask", + "darkpixelmask", + "avgmask", + ]: bak[opt] = getattr(srx.config, opt) - - xycenter = [int(srx.config.xbeamcenter), - int(srx.config.ybeamcenter)] - + + xycenter = [int(srx.config.xbeamcenter), int(srx.config.ybeamcenter)] + qmax = srx.config.qmax # qstep = qmax / 2000 qstep = qmax / srx.config.xdimension - - srx.updateConfig(uncertaintyenable=False, - integrationspace='qspace', - # qmax=qmax, - qstep=qstep, - brightpixelmask=False, - darkpixelmask=False, - avgmask=False) + + srx.updateConfig( + uncertaintyenable=False, + integrationspace="qspace", + # qmax=qmax, + qstep=qstep, + brightpixelmask=False, + darkpixelmask=False, + avgmask=False, + ) # qind = [50, 1000] qind = [None, None] - qind[0] = int(qrange[0] / qstep) if qrange[0] != None else srx.config.xdimension / 20 - qind[0] = 0 if qind[0] < 0 else qind[0] - qind[1] = int(qrange[1] / qstep) if qrange[1] != None else srx.config.xdimension / 2 - qind[1] = srx.config.xdimension - 5 if qind[1] > srx.config.xdimension - 5 else qind[1] - + qind[0] = ( + int(qrange[0] / qstep) + if qrange[0] != None + else srx.config.xdimension / 20 + ) + qind[0] = 0 if qind[0] < 0 else qind[0] + qind[1] = ( + int(qrange[1] / qstep) + if qrange[1] != None + else srx.config.xdimension / 2 + ) + qind[1] = ( + srx.config.xdimension - 5 + if qind[1] > srx.config.xdimension - 5 + else qind[1] + ) + srx.prepareCalculation() srxconfig = srx.config image = np.array(srx._getPic(image)) - - func = partial(halfcut, srx=srx, image=image, qind=qind, mode=mode, output=output, - xycenter=xycenter, show=False) - - xywidth = 6 if not kwargs.has_key('xywidth') else kwargs['xywidth'] - if mode == 'x': + + func = partial( + halfcut, + srx=srx, + image=image, + qind=qind, + mode=mode, + output=output, + xycenter=xycenter, + show=False, + ) + + xywidth = 6 if not kwargs.has_key("xywidth") else kwargs["xywidth"] + if mode == "x": p0 = [srxconfig.xbeamcenter] bounds = (p0[0] - xywidth, p0[0] + xywidth) - elif mode == 'y': + elif mode == "y": p0 = [srxconfig.ybeamcenter] bounds = (p0[0] - xywidth, p0[0] + xywidth) - elif mode == 'tilt': + elif mode == "tilt": p0 = [srxconfig.tiltd] bounds = (p0[0] - 5, p0[0] + 5) - elif mode == 'rotation': + elif mode == "rotation": p0 = [srxconfig.rotationd] bounds = (0, 360) - elif mode == 'all': - p0 = [srxconfig.xbeamcenter, srxconfig.ybeamcenter, srxconfig.rotationd, srxconfig.tiltd] - bounds = [[p0[0] - xywidth, p0[0] + xywidth], [p0[1] - xywidth, p0[1] + xywidth], - [0, 360], [srxconfig.tiltd - 10, srxconfig.tiltd + 10]] - elif mode == 'xy': + elif mode == "all": + p0 = [ + srxconfig.xbeamcenter, + srxconfig.ybeamcenter, + srxconfig.rotationd, + srxconfig.tiltd, + ] + bounds = [ + [p0[0] - xywidth, p0[0] + xywidth], + [p0[1] - xywidth, p0[1] + xywidth], + [0, 360], + [srxconfig.tiltd - 10, srxconfig.tiltd + 10], + ] + elif mode == "xy": p0 = [srxconfig.xbeamcenter, srxconfig.ybeamcenter] - bounds = [[p0[0] - xywidth, p0[0] + xywidth], [p0[1] - xywidth, p0[1] + xywidth]] - + bounds = [ + [p0[0] - xywidth, p0[0] + xywidth], + [p0[1] - xywidth, p0[1] + xywidth], + ] + if output == 0: - if mode in ['x', 'y', 'tilt', 'rotation']: + if mode in ["x", "y", "tilt", "rotation"]: rv = minimize1(func, bounds) p = [rv] else: - rv = minimize(func, p0, method='Powell', bounds=bounds, options={'xtol':0.001, 'ftol':0.001}) + rv = minimize( + func, + p0, + method="Powell", + bounds=bounds, + options={"xtol": 0.001, "ftol": 0.001}, + ) p = rv.x else: rv = leastsq(func, p0, epsfcn=0.001) p = rv[0] - - print p - if mode == 'x': + + print(p) + if mode == "x": srx.updateConfig(xbeamcenter=p[0], **bak) prv = p[0] - elif mode == 'y': + elif mode == "y": srx.updateConfig(ybeamcenter=p[0], **bak) - elif mode == 'tilt': - srx.updateConfig(tiltd=p[0], ** bak) - elif mode == 'rotation': - srx.updateConfig(rotation=p[0], ** bak) - elif mode == 'xy': - srx.updateConfig(xbeamcenter=p[0], ybeamcenter=p[1], ** bak) - elif mode == 'all': - srx.updateConfig(xbeamcenter=p[0], ybeamcenter=p[1], rotationd=p[2], tiltd=p[3], ** bak) - + elif mode == "tilt": + srx.updateConfig(tiltd=p[0], **bak) + elif mode == "rotation": + srx.updateConfig(rotation=p[0], **bak) + elif mode == "xy": + srx.updateConfig(xbeamcenter=p[0], ybeamcenter=p[1], **bak) + elif mode == "all": + srx.updateConfig( + xbeamcenter=p[0], + ybeamcenter=p[1], + rotationd=p[2], + tiltd=p[3], + **bak + ) + if showresults: - halfcut([], srx=srx, image=image, xycenter=xycenter, qind=qind, show=True, mode='show', output=output) + halfcut( + [], + srx=srx, + image=image, + xycenter=xycenter, + qind=qind, + show=True, + mode="show", + output=output, + ) return p -def selfCalibrate(srx, image, mode='xy', cropedges='auto', showresults=False, qrange=[None, None], **kwargs): - ''' + +def selfCalibrate( + srx, + image, + mode="xy", + cropedges="auto", + showresults=False, + qrange=[None, None], + **kwargs +): + """ Do the self calibration - - the initial value is read from the current value of srx object, and the + + the initial value is read from the current value of srx object, and the refined results will be writrn into the srx object - + :param srx: SrXplanar object, object to do the integration :param image: str or 2d array, image to be calibrated :param mode: str or list of str: @@ -274,37 +379,49 @@ def selfCalibrate(srx, image, mode='xy', cropedges='auto', showresults=False, qr if 'all', then use all pixels :param showresults: bool, plot the halfcut result :param qrange: q range used in calculating difference - + :return: list, refined parameter - ''' - + """ + # lineCalibrate(srx, image) - + p = [] if isinstance(mode, str): xc = srx.config.xbeamcenter yc = srx.config.ybeamcenter xd = srx.config.xdimension yd = srx.config.ydimension - + if not isinstance(cropedges, (list, tuple)): - if cropedges == 'y' or (cropedges == 'auto' and mode == 'y'): + if cropedges == "y" or (cropedges == "auto" and mode == "y"): ce = [int(xc - 50), int(xd - xc - 50), yd / 100, yd / 100] - elif cropedges == 'x' or (cropedges == 'auto' and mode == 'x'): + elif cropedges == "x" or (cropedges == "auto" and mode == "x"): ce = [xd / 100, xd / 100, int(yc - 50), int(yd - yc - 50)] - elif cropedges == 'box' or (cropedges == 'auto' and (not mode in ['x', 'y'])): - ce = [int(xc - xd / 6), int(xd - xc - xd / 6), - int(yc - yd / 6), int(yd - yc - yd / 6)] + elif cropedges == "box" or ( + cropedges == "auto" and (not mode in ["x", "y"]) + ): + ce = [ + int(xc - xd / 6), + int(xd - xc - xd / 6), + int(yc - yd / 6), + int(yd - yc - yd / 6), + ] else: ce = [10, 10, 10, 10] - + cebak = srx.config.cropedges srx.updateConfig(cropedges=ce) - p = selfCalibrateX(srx, image, mode=mode, showresults=showresults, qrange=qrange, **kwargs) + p = selfCalibrateX( + srx, + image, + mode=mode, + showresults=showresults, + qrange=qrange, + **kwargs + ) srx.updateConfig(cropedges=cebak) - + elif isinstance(mode, (list, tuple)): for m in mode: p = selfCalibrate(srx, image, m, cropedges, qrange=qrange) return p - diff --git a/diffpy/srxplanar/srxplanar.py b/diffpy/srxplanar/srxplanar.py index 1ac8386..55d2870 100644 --- a/diffpy/srxplanar/srxplanar.py +++ b/diffpy/srxplanar/srxplanar.py @@ -13,13 +13,14 @@ # ############################################################################## -''' +""" srxplanar main modular -''' +""" import numpy as np import scipy.sparse as ssp import os, sys + # import time from diffpy.srxplanar.srxplanarconfig import SrXplanarConfig @@ -28,27 +29,32 @@ from diffpy.srxplanar.mask import Mask from diffpy.srxplanar.saveresults import SaveResults + class SrXplanar(object): - ''' + """ main modular for srxplanar - ''' + """ - def __init__(self, srxplanarconfig=None, configfile=None, args=None, **kwargs): - ''' + def __init__( + self, srxplanarconfig=None, configfile=None, args=None, **kwargs + ): + """ init srxplanar form a SrXplanarConfig instance, or config file, or args passed from cmd - or kwargs. If both SrXplanarConfig instance and other configfile/args/kwargs is specified, + or kwargs. If both SrXplanarConfig instance and other configfile/args/kwargs is specified, it will first init from config instance then update using configfile/args/kwargs - + :param srxplanarconfig: SrXplanarConfig, init srxplanar from a config instance :param configfile: string, name of config file :param args: list of str, usually be sys.argv :param kwargs: you can use like 'xbeamcenter=1024' or a dict to update the value of xbeamcenter - ''' + """ if srxplanarconfig != None: self.config = srxplanarconfig self.config.updateConfig(filename=configfile, args=args, **kwargs) else: - self.config = SrXplanarConfig(filename=configfile, args=args, **kwargs) + self.config = SrXplanarConfig( + filename=configfile, args=args, **kwargs + ) # init modulars self.loadimage = LoadImage(self.config) self.calculate = Calculate(self.config) @@ -57,15 +63,15 @@ def __init__(self, srxplanarconfig=None, configfile=None, args=None, **kwargs): return def updateConfig(self, filename=None, args=None, **kwargs): - ''' + """ update config using configfile/args/kwargs, then rerun all prepareCalculation() - + :param configfile: string, name of config file :param args: list of str, usually be sys.argv :param kwargs: you can use like 'xbeamcenter=1024' or a dict to update the value of xbeamcenter - + :return: None - ''' + """ self.config.updateConfig(filename=filename, args=args, **kwargs) # update instances self.calculate.prepareCalculation() @@ -73,28 +79,28 @@ def updateConfig(self, filename=None, args=None, **kwargs): return def prepareCalculation(self, pic=None): - ''' + """ prepare data used in calculation - - :param pic: str, list of str, or 2d array, if provided, and automask is True, then + + :param pic: str, list of str, or 2d array, if provided, and automask is True, then generate a dynamic mask - + :return: None - ''' + """ self.staticmask = self.mask.staticMask() self.correction = self.calculate.genCorrectionMatrix() self.staticmask = np.logical_or(self.mask.edgeMask(), self.staticmask) self.calculate.genIntegrationInds(self.staticmask) return - + def _picChanged(self, extramask=None): - ''' + """ update all pic related data (such as dynamic mask) when a new image is read - + :param extramask: 2d array, extra mask applied in integration - + :return: None - ''' + """ dynamicmask = self.mask.dynamicMask(self.pic, dymask=self.staticmask) if dynamicmask != None: @@ -111,16 +117,16 @@ def _picChanged(self, extramask=None): return def _getSaveFileName(self, imagename=None, filename=None): - ''' + """ get the save file name, the priority order is self.output> filename> imagename > 'output'(default name) - + :param imagename: string, filename/path of image file (drop this term if it is an image array) - :param filename: string, - - :return: string, name of file to be saved - ''' - rv = 'output' - if self.config.output != None and self.config.output != '': + :param filename: string, + + :return: string, name of file to be saved + """ + rv = "output" + if self.config.output != None and self.config.output != "": rv = self.config.output elif filename != None: rv = filename @@ -129,10 +135,10 @@ def _getSaveFileName(self, imagename=None, filename=None): return rv def _getPic(self, image, flip=None, correction=None): - ''' + """ load picture to 2d array - - :param image: could be a string, a list of string or a 2d array, + + :param image: could be a string, a list of string or a 2d array, if string, load the image file using the string as the path. if list of string, load the image files using the string as their path and sum them togethor @@ -142,9 +148,9 @@ def _getPic(self, image, flip=None, correction=None): Flip behavior is controlled in self.config :param correction: apply correction to the returned 2d array if None: correct on the string/list of string, not correct on the 2d array - + :return: 2d array of image - ''' + """ if isinstance(image, list): rv = np.zeros((self.config.ydimension, self.config.xdimension)) for imagefile in image: @@ -154,7 +160,9 @@ def _getPic(self, image, flip=None, correction=None): rv = self.loadimage.loadImage(image) if correction == None or correction == True: ce = self.config.cropedges - rv[ce[2]:-ce[3], ce[0]:-ce[1]] = rv[ce[2]:-ce[3], ce[0]:-ce[1]] * self.correction + rv[ce[2] : -ce[3], ce[0] : -ce[1]] = ( + rv[ce[2] : -ce[3], ce[0] : -ce[1]] * self.correction + ) # rv *= self.correction else: rv = image @@ -163,16 +171,26 @@ def _getPic(self, image, flip=None, correction=None): if correction == True: # rv *= self.correction ce = self.config.cropedges - rv[ce[2]:-ce[3], ce[0]:-ce[1]] = rv[ce[2]:-ce[3], ce[0]:-ce[1]] * self.correction - if rv.dtype.kind != 'f': + rv[ce[2] : -ce[3], ce[0] : -ce[1]] = ( + rv[ce[2] : -ce[3], ce[0] : -ce[1]] * self.correction + ) + if rv.dtype.kind != "f": rv = rv.astype(float) return rv - def integrate(self, image, savename=None, savefile=True, flip=None, correction=None, extramask=None): - ''' + def integrate( + self, + image, + savename=None, + savefile=True, + flip=None, + correction=None, + extramask=None, + ): + """ integrate 2d image to 1d diffraction pattern, then save to disk - - :param image: str or 2d array, + + :param image: str or 2d array, if str, then read image file using it as file name. if 2d array, integrate this 2d array. :param savename: str, name of file to save @@ -182,28 +200,38 @@ def integrate(self, image, savename=None, savefile=True, flip=None, correction=N Flip behavior is controlled in self.config :param correction: apply correction to the returned 2d array if None: correct on the string/list of string, not correct on the 2d array - :param extramask: 2d array, extra mask applied in integration - - :return: dict, rv['chi'] is a 2d array of integrated intensity, shape is (2, len of intensity) - or (3, len of intensity) in [tth or q, intensity, (uncertainty)]. rv['filename'] is the + :param extramask: 2d array, extra mask applied in integration + + :return: dict, rv['chi'] is a 2d array of integrated intensity, shape is (2, len of intensity) + or (3, len of intensity) in [tth or q, intensity, (uncertainty)]. rv['filename'] is the name of file to save to disk - ''' + """ rv = {} self.pic = self._getPic(image, flip, correction) - rv['filename'] = self._getSaveFileName(imagename=image, filename=savename) + rv["filename"] = self._getSaveFileName( + imagename=image, filename=savename + ) self._picChanged(extramask=extramask) # calculate - rv['chi'] = self.chi = self.calculate.intensity(self.pic) + rv["chi"] = self.chi = self.calculate.intensity(self.pic) # save if savefile: - rv['filename'] = self.saveresults.save(rv) + rv["filename"] = self.saveresults.save(rv) return rv - def integrateFilelist(self, filelist, summation=None, filename=None, flip=None, correction=None, extramask=None): - ''' + def integrateFilelist( + self, + filelist, + summation=None, + filename=None, + flip=None, + correction=None, + extramask=None, + ): + """ process all file in filelist, integrate them separately or together - + :param filelist: list of string, files to be integrated (full path) :param summation: bool or None, sum all files together or not, if None, use self.config.summation @@ -213,98 +241,118 @@ def integrateFilelist(self, filelist, summation=None, filename=None, flip=None, Flip behavior is controlled in self.config :param correction: apply correction to the returned 2d array if None: correct on the string/list of string, not correct on the 2d array - :param extramask: 2d array, extra mask applied in integration - - :return: list of dict, in each dict, rv['chi'] is a 2d array of integrated intensity, shape is (2, len of intensity) - or (3, len of intensity) as [tth or q, intensity, (uncertainty)]. rv['filename'] is the + :param extramask: 2d array, extra mask applied in integration + + :return: list of dict, in each dict, rv['chi'] is a 2d array of integrated intensity, shape is (2, len of intensity) + or (3, len of intensity) as [tth or q, intensity, (uncertainty)]. rv['filename'] is the name of file to save to disk - ''' + """ summation = self.config.summation if summation == None else summation - if (summation)and(len(filelist) > 1): + if (summation) and (len(filelist) > 1): image = self._getPic(filelist, flip, correction) if filename == None: if isinstance(filelist[-1], str): - filename = os.path.splitext(filelist[-1])[0] + '_sum.chi' + filename = os.path.splitext(filelist[-1])[0] + "_sum.chi" else: - filename = 'Sum_xrd.chi' - rv = [self.integrate(image, savename=filename, extramask=extramask)] + filename = "Sum_xrd.chi" + rv = [ + self.integrate(image, savename=filename, extramask=extramask) + ] else: i = 0 rv = [] for imagefile in filelist: if filename == None: - rvv = self.integrate(imagefile, flip=flip, correction=correction, extramask=extramask) + rvv = self.integrate( + imagefile, + flip=flip, + correction=correction, + extramask=extramask, + ) else: - rvv = self.integrate(imagefile, savename=filename + '%03d' % i, - flip=flip, correction=correction, extramask=extramask) + rvv = self.integrate( + imagefile, + savename=filename + "%03d" % i, + flip=flip, + correction=correction, + extramask=extramask, + ) rv.append(rvv) return rv def process(self): - ''' + """ process the images according to filenames/includepattern/excludepattern/summation - by default, it will scan current/tifdirectory and integrate all files match + by default, it will scan current/tifdirectory and integrate all files match includepattern/excludepattern and/or filenames. - + Usually this one is called from cmd line rather then script. - + :return: None - ''' + """ if not self.config.nocalculation: filelist = self.loadimage.genFileList() if len(filelist) > 0: self.prepareCalculation(pic=filelist[0]) self.integrateFilelist(filelist) else: - print 'No input files or configurations' + print("No input files or configurations") self.config.args.print_help() # mask creating - elif self.config.createmask != '': + elif self.config.createmask != "": self.createMask() # if no config is passed to srxplanar else: - print 'No input files or configurations' + print("No input files or configurations") self.config.args.print_help() return def createMask(self, filename=None, pic=None, addmask=None): - ''' + """ create and save a mask according to addmask, pic, 1 stands for masked pixel in saved file - + :param filename: name of mask file to save, 'mask.npy' if it is None :param pic: 2d image array, may used in generating dynamic mask, Be careful if this one is flipped or not :param addmask: list of str, control how to generate mask, see Mask module for detail - + :return: 2d array, 1 stands for masked pixel here - ''' + """ filename = self.config.createmask if filename == None else filename - filename = 'mask.npy' if filename == '' else filename + filename = "mask.npy" if filename == "" else filename addmask = self.config.addmask if addmask == None else addmask - if not hasattr(self, 'mask'): + if not hasattr(self, "mask"): self.mask = Mask(self.config) - if not hasattr(self, 'loadimage'): + if not hasattr(self, "loadimage"): self.loadimage = LoadImage(self.config) if pic == None: filelist = self.loadimage.genFileList() - if hasattr(self, 'pic'): + if hasattr(self, "pic"): if self.pic != None: pic = self.pic else: - pic = self.loadimage.loadImage(filelist[0]) if len(filelist) > 0 else None + pic = ( + self.loadimage.loadImage(filelist[0]) + if len(filelist) > 0 + else None + ) else: - pic = self.loadimage.loadImage(filelist[0]) if len(filelist) > 0 else None + pic = ( + self.loadimage.loadImage(filelist[0]) + if len(filelist) > 0 + else None + ) rv = self.mask.saveMask(filename, pic, addmask) return rv - def main(): - ''' + """ read config and integrate images - ''' + """ srxplanar = SrXplanar(args=sys.argv[1:]) srxplanar.process() return -if __name__ == '__main__': + +if __name__ == "__main__": sys.exit(main()) diff --git a/diffpy/srxplanar/srxplanarconfig.py b/diffpy/srxplanar/srxplanarconfig.py index 58ccce4..5916fe8 100644 --- a/diffpy/srxplanar/srxplanarconfig.py +++ b/diffpy/srxplanar/srxplanarconfig.py @@ -20,15 +20,17 @@ import argparse from diffpy.confutils.config import ConfigBase -from diffpy.confutils.tools import _configPropertyRad, _configPropertyR, _configPropertyRW +from diffpy.confutils.tools import ( + _configPropertyRad, + _configPropertyR, + _configPropertyRW, +) -_description = \ -''' +_description = """ SrXplanar -- integrate 2D powder diffraction image to 1D with unceratinty propagation -''' - # Text to display after the argument help -_epilog = \ -''' +""" +# Text to display after the argument help +_epilog = """ Examples: srxplanar KFe2As2-00838.tif -c test.cfg @@ -40,206 +42,449 @@ srxplanar --createconfig config.cfg --create default (short) config file using all default value -''' +""" _optdatalist = [ - # control group - ['filenames', {'sec':'Control', 'config':'n', 'header':'n', - 'f':'filename', - 'h':'filename or list of filenames or filename pattern or list of filename pattern', - 'n':'*', - 'd':[], }], - ['output', {'sec':'Experiment', 'config':'n', 'header':'n', - 's':'o', - 'h':'basename of output file', - 'd':'', }], - ['summation', {'sec':'Control', 'config':'n', 'header':'n', - 's':'s', - 'h':'sum all the image and then integrate', - 'n':'?', - 'co':True, - 'd':False, }], - # Expeiment gropu - ['opendirectory', {'sec':'Control', 'header':'n', - 's':'opendir', - 'h':'directory of input 2D image files', - 'd':'currentdir', - 'tt':'directory'}], - ['savedirectory', {'sec':'Control', 'header':'n', - 's':'savedir', - 'h':'directory of output files', - 'd':'currentdir', - 'tt':'directory'}], - ['maskfile', {'sec':'Experiment', - 's':'mask', - 'h':'the mask file (support numpy .npy array, and tiff image, >0 stands for masked pixel)', - 'd':'', - 'tt':'file'}], - ['createmask', {'sec':'Control', 'config':'n', 'header':'n', - 'h':'create a mask file according to current image file and value of addmask', - 'd':'', }], - ['integrationspace', {'sec':'Experiment', - 'h':'the x-grid of integrated 1D diffraction data', - 'd':'twotheta', - 'c':['qspace', 'twotheta'], }], - ['wavelength', {'sec':'Experiment', - 'h':'wavelength of x-ray, in Angstrom', - 'd':0.1000, }], - ['xbeamcenter', {'sec':'Experiment', - 's':'xc', - 'h':'beamcenter in x axis, in pixel', - 'd':1024.0, }], - ['ybeamcenter', {'sec':'Experiment', - 's':'yc', - 'h':'beamcenter in y axis, in pixel', - 'd':1024.0, }], - ['distance', {'sec':'Experiment', - 's':'dis', - 'h':'distance between detector and sample, in mm', - 'd':200.0, }], - ['rotationd', {'sec':'Experiment', - 's':'rot', - 'h':'rotation angle of tilt plane, in degree', - 'd':0.0, }], - ['tiltd', {'sec':'Experiment', - 's':'tilt', - 'h':'tilt angle of tilt plane, in degree', - 'd':0.0, }], - ['tthstepd', {'sec':'Experiment', - 's':'ts', - 'h':'integration step in twotheta space, in degree', - 'd':0.02, }], - ['qstep', {'sec':'Experiment', - 's':'qs', - 'h':'integration step in q space, in Angstrom^-1', - 'd':0.02, }], - # Beamline group - ['includepattern', {'sec':'Beamline', 'header':'n', 'config':'f', - 's':'ipattern', - 'h':'list of string, file name patterns for included files', - 'n':'*', - 'd':['*.tif', '*.tif.bz2'], }], - ['excludepattern', {'sec':'Beamline', 'header':'n', 'config':'f', - 's':'epattern', - 'h':'list of string, file name patterns for excluded files', - 'n':'*', - 'd':['*.dark.tif', '*.raw.tif'], }], - ['fliphorizontal', {'sec':'Beamline', - 'h':'filp the image horizontally', - 'n':'?', - 'co':True, - 'd':False, }], - ['flipvertical', {'sec':'Beamline', - 'h':'filp the image vertically', - 'n':'?', - 'co':True, - 'd':True, }], - ['xdimension', {'sec':'Beamline', - 's':'xd', - 'h':'detector dimension in x axis, in pixel', - 'd':2048, }], - ['ydimension', {'sec':'Beamline', - 's':'yd', - 'h':'detector dimension in y axis, in pixel', - 'd':2048, }], - ['xpixelsize', {'sec':'Beamline', - 's':'xp', - 'h':'detector pixel size in x axis, in mm', - 'd':0.2, }], - ['ypixelsize', {'sec':'Beamline', - 's':'yp', - 'h':'detector pixel size in y axis, in mm', - 'd':0.2, }], - # Others Group - ['uncertaintyenable', {'sec':'Others', - 's':'error', - 'h':'enable uncertainty propagation', - 'n':'?', - 'co':True, - 'd':True, }], - ['sacorrectionenable', {'sec':'Others', - 's':'sacorr', - 'h':'enable solid angle correction', - 'n':'?', - 'co':True, - 'd':True, }], - ['polcorrectionenable', {'sec':'Others', - 's':'polarcorr', - 'h':'enable polarization correction', - 'n':'?', - 'co':True, - 'd':True, }], - ['polcorrectf', {'sec':'Others', - 's':'polarf', - 'h':'polarization correction factor', - 'd':0.99, }], - ['brightpixelmask', {'sec':'Others', - 'h':'mask the bright pixel by comparing their local environments', - 'n':'?', - 'co':True, - 'd':True, }], - ['darkpixelmask', {'sec':'Others', - 'h':'mask the dark pixel by comparing their local environments', - 'n':'?', - 'co':True, - 'd':True, }], - ['avgmask', {'sec':'Others', - 'h':'create a dynamic averaging mask that mask pixel with too high or too low intensity compare to the pixels which have similar twotheta value', - 'n':'?', - 'co':True, - 'd':True, }], - ['gsasoutput', {'sec':'Others', 'header':'n', - 'h':'select if want to output gsas format file', - 'c':['None', 'std', 'esd', 'fxye'], - 'd':'None', }], - ['filenameplus', {'sec':'Others', 'header':'n', - 'h':'string appended to the output filename', - 'd':'', }], - ['cropedges', {'sec':'Others', - 'h':'crop the image, maske pixels around the image edge (left, right, top, bottom), must larger than 0', - 'n':4, - 'tt':'array', - 't':'intlist', - 'd':[10, 10, 10, 10], }], - ['extracrop', {'sec':'Others', 'args':'n', 'config':'n', 'header':'n', - 'h':'crop the edge pixels, first four means the number of pixels masked in each edge \ + # control group + [ + "filenames", + { + "sec": "Control", + "config": "n", + "header": "n", + "f": "filename", + "h": "filename or list of filenames or filename pattern or list of filename pattern", + "n": "*", + "d": [], + }, + ], + [ + "output", + { + "sec": "Experiment", + "config": "n", + "header": "n", + "s": "o", + "h": "basename of output file", + "d": "", + }, + ], + [ + "summation", + { + "sec": "Control", + "config": "n", + "header": "n", + "s": "s", + "h": "sum all the image and then integrate", + "n": "?", + "co": True, + "d": False, + }, + ], + # Expeiment gropu + [ + "opendirectory", + { + "sec": "Control", + "header": "n", + "s": "opendir", + "h": "directory of input 2D image files", + "d": "currentdir", + "tt": "directory", + }, + ], + [ + "savedirectory", + { + "sec": "Control", + "header": "n", + "s": "savedir", + "h": "directory of output files", + "d": "currentdir", + "tt": "directory", + }, + ], + [ + "maskfile", + { + "sec": "Experiment", + "s": "mask", + "h": "the mask file (support numpy .npy array, and tiff image, >0 stands for masked pixel)", + "d": "", + "tt": "file", + }, + ], + [ + "createmask", + { + "sec": "Control", + "config": "n", + "header": "n", + "h": "create a mask file according to current image file and value of addmask", + "d": "", + }, + ], + [ + "integrationspace", + { + "sec": "Experiment", + "h": "the x-grid of integrated 1D diffraction data", + "d": "twotheta", + "c": ["qspace", "twotheta"], + }, + ], + [ + "wavelength", + { + "sec": "Experiment", + "h": "wavelength of x-ray, in Angstrom", + "d": 0.1000, + }, + ], + [ + "xbeamcenter", + { + "sec": "Experiment", + "s": "xc", + "h": "beamcenter in x axis, in pixel", + "d": 1024.0, + }, + ], + [ + "ybeamcenter", + { + "sec": "Experiment", + "s": "yc", + "h": "beamcenter in y axis, in pixel", + "d": 1024.0, + }, + ], + [ + "distance", + { + "sec": "Experiment", + "s": "dis", + "h": "distance between detector and sample, in mm", + "d": 200.0, + }, + ], + [ + "rotationd", + { + "sec": "Experiment", + "s": "rot", + "h": "rotation angle of tilt plane, in degree", + "d": 0.0, + }, + ], + [ + "tiltd", + { + "sec": "Experiment", + "s": "tilt", + "h": "tilt angle of tilt plane, in degree", + "d": 0.0, + }, + ], + [ + "tthstepd", + { + "sec": "Experiment", + "s": "ts", + "h": "integration step in twotheta space, in degree", + "d": 0.02, + }, + ], + [ + "qstep", + { + "sec": "Experiment", + "s": "qs", + "h": "integration step in q space, in Angstrom^-1", + "d": 0.02, + }, + ], + # Beamline group + [ + "includepattern", + { + "sec": "Beamline", + "header": "n", + "config": "f", + "s": "ipattern", + "h": "list of string, file name patterns for included files", + "n": "*", + "d": ["*.tif", "*.tif.bz2"], + }, + ], + [ + "excludepattern", + { + "sec": "Beamline", + "header": "n", + "config": "f", + "s": "epattern", + "h": "list of string, file name patterns for excluded files", + "n": "*", + "d": ["*.dark.tif", "*.raw.tif"], + }, + ], + [ + "fliphorizontal", + { + "sec": "Beamline", + "h": "filp the image horizontally", + "n": "?", + "co": True, + "d": False, + }, + ], + [ + "flipvertical", + { + "sec": "Beamline", + "h": "filp the image vertically", + "n": "?", + "co": True, + "d": True, + }, + ], + [ + "xdimension", + { + "sec": "Beamline", + "s": "xd", + "h": "detector dimension in x axis, in pixel", + "d": 2048, + }, + ], + [ + "ydimension", + { + "sec": "Beamline", + "s": "yd", + "h": "detector dimension in y axis, in pixel", + "d": 2048, + }, + ], + [ + "xpixelsize", + { + "sec": "Beamline", + "s": "xp", + "h": "detector pixel size in x axis, in mm", + "d": 0.2, + }, + ], + [ + "ypixelsize", + { + "sec": "Beamline", + "s": "yp", + "h": "detector pixel size in y axis, in mm", + "d": 0.2, + }, + ], + # Others Group + [ + "uncertaintyenable", + { + "sec": "Others", + "s": "error", + "h": "enable uncertainty propagation", + "n": "?", + "co": True, + "d": True, + }, + ], + [ + "sacorrectionenable", + { + "sec": "Others", + "s": "sacorr", + "h": "enable solid angle correction", + "n": "?", + "co": True, + "d": True, + }, + ], + [ + "polcorrectionenable", + { + "sec": "Others", + "s": "polarcorr", + "h": "enable polarization correction", + "n": "?", + "co": True, + "d": True, + }, + ], + [ + "polcorrectf", + { + "sec": "Others", + "s": "polarf", + "h": "polarization correction factor", + "d": 0.99, + }, + ], + [ + "brightpixelmask", + { + "sec": "Others", + "h": "mask the bright pixel by comparing their local environments", + "n": "?", + "co": True, + "d": True, + }, + ], + [ + "darkpixelmask", + { + "sec": "Others", + "h": "mask the dark pixel by comparing their local environments", + "n": "?", + "co": True, + "d": True, + }, + ], + [ + "avgmask", + { + "sec": "Others", + "h": "create a dynamic averaging mask that mask pixel with too high or too low intensity compare to the pixels which have similar twotheta value", + "n": "?", + "co": True, + "d": True, + }, + ], + [ + "gsasoutput", + { + "sec": "Others", + "header": "n", + "h": "select if want to output gsas format file", + "c": ["None", "std", "esd", "fxye"], + "d": "None", + }, + ], + [ + "filenameplus", + { + "sec": "Others", + "header": "n", + "h": "string appended to the output filename", + "d": "", + }, + ], + [ + "cropedges", + { + "sec": "Others", + "h": "crop the image, maske pixels around the image edge (left, right, top, bottom), must larger than 0", + "n": 4, + "tt": "array", + "t": "intlist", + "d": [10, 10, 10, 10], + }, + ], + [ + "extracrop", + { + "sec": "Others", + "args": "n", + "config": "n", + "header": "n", + "h": "crop the edge pixels, first four means the number of pixels masked in each edge \ (left, right, top, bottom), this crop is after all prepare calculation, \ -so change this value does not require a config update, value must larger than 0', - 'n':4, - 'tt':'array', - 't':'intlist', - 'd':[1, 1, 1, 1], }], - ['nocalculation', {'sec':'Others', 'config':'n', 'header':'n', - 'h':'set True to disable all calculation, will automaticly set True if createconfig or createmask', - 'n':'?', - 'co':True, - 'd':False, }], - # masking - ['brightpixelr', {'sec':'Others', 'args':'n', 'config':'n', 'header':'n', - 'h':'a threshold for masked pixels in bright pixel masking', - 'd':1.2, }], - ['brightpixelsize', {'sec':'Others', 'args':'n', 'config':'n', 'header':'n', - 'h':'size of local testing area in bright pixel masking', - 'd':5, }], - ['darkpixelr', {'sec':'Others', 'args':'n', 'config':'n', 'header':'n', - 'h':'a threshold for masked pixels in dark pixel masking', - 'd':0.1, }], - ['avgmaskhigh', {'sec':'Others', 'args':'n', 'config':'n', 'header':'n', - 'h':'a threshold for masked pixels in average masking, pixels with (self_int > avg_int * avgmaskhigh) will be masked', - 'd':2.0, }], - ['avgmasklow', {'sec':'Others', 'args':'n', 'config':'n', 'header':'n', - 'h':'a threshold for masked pixels in average masking, pixels with (self_int < avg_int * avgmasklow) will be masked', - 'd':0.5, }], - ] - -_defaultdata = {'configfile': ['srxplanar.cfg', 'SrXplanar.cfg'], - 'headertitle': 'SrXplanar configration' - } +so change this value does not require a config update, value must larger than 0", + "n": 4, + "tt": "array", + "t": "intlist", + "d": [1, 1, 1, 1], + }, + ], + [ + "nocalculation", + { + "sec": "Others", + "config": "n", + "header": "n", + "h": "set True to disable all calculation, will automaticly set True if createconfig or createmask", + "n": "?", + "co": True, + "d": False, + }, + ], + # masking + [ + "brightpixelr", + { + "sec": "Others", + "args": "n", + "config": "n", + "header": "n", + "h": "a threshold for masked pixels in bright pixel masking", + "d": 1.2, + }, + ], + [ + "brightpixelsize", + { + "sec": "Others", + "args": "n", + "config": "n", + "header": "n", + "h": "size of local testing area in bright pixel masking", + "d": 5, + }, + ], + [ + "darkpixelr", + { + "sec": "Others", + "args": "n", + "config": "n", + "header": "n", + "h": "a threshold for masked pixels in dark pixel masking", + "d": 0.1, + }, + ], + [ + "avgmaskhigh", + { + "sec": "Others", + "args": "n", + "config": "n", + "header": "n", + "h": "a threshold for masked pixels in average masking, pixels with (self_int > avg_int * avgmaskhigh) will be masked", + "d": 2.0, + }, + ], + [ + "avgmasklow", + { + "sec": "Others", + "args": "n", + "config": "n", + "header": "n", + "h": "a threshold for masked pixels in average masking, pixels with (self_int < avg_int * avgmasklow) will be masked", + "d": 0.5, + }, + ], +] + +_defaultdata = { + "configfile": ["srxplanar.cfg", "SrXplanar.cfg"], + "headertitle": "SrXplanar configration", +} + class SrXplanarConfig(ConfigBase): - ''' + """ config class, based on ConfigBase class in diffpy.confutils - ''' + """ # Text to display before the argument help _description = _description @@ -252,79 +497,79 @@ class SrXplanarConfig(ConfigBase): _defaultdata = _defaultdata def _preInit(self, **kwargs): - ''' + """ method called in init process, overload it! - + this method will be called before reading config from file/args/kwargs - + add degree/rad delegation for rotation, tilt, tthstep, tthmax - ''' + """ - for name in ['rotation', 'tilt', 'tthstep', 'tthmax']: - setattr(self.__class__, name, _configPropertyRad(name + 'd')) + for name in ["rotation", "tilt", "tthstep", "tthmax"]: + setattr(self.__class__, name, _configPropertyRad(name + "d")) # cls._configlist['Experiment'].extend(['rotation', 'tilt', 'tthstep', 'tthmax']) return def _preUpdateSelf(self, **kwargs): - ''' + """ additional process called in self._updateSelf, this method is called before self._copySelftoConfig(), i.e. before copy options value to self.config (config file) - + check the tthmaxd and qmax, and set tthorqmax, tthorqstep according to integration space - + :param kwargs: optional kwargs - ''' + """ self.tthmaxd, self.qmax = checkMax(self) - if self.integrationspace == 'twotheta': + if self.integrationspace == "twotheta": self.tthorqmax = self.tthmax self.tthorqstep = self.tthstep - elif self.integrationspace == 'qspace': + elif self.integrationspace == "qspace": self.tthorqmax = self.qmax self.tthorqstep = self.qstep - + self.cropedges = [a if a > 1 else 1 for a in self.cropedges] self.extracrop = [a if a > 1 else 1 for a in self.extracrop] return def _postUpdateConfig(self, **kwargs): - ''' - post processing after parse args or kwargs, this method is called after - in self._postPocessing and before creating config file action - + """ + post processing after parse args or kwargs, this method is called after + in self._postPocessing and before creating config file action + set nocalculatio flag when create config or create mask - + :param kwargs: optional kwargs - ''' + """ - if (self.createconfig != '')and(self.createconfig != None): + if (self.createconfig != "") and (self.createconfig != None): self.nocalculation = True - if (self.createconfigfull != '')and(self.createconfigfull != None): + if (self.createconfigfull != "") and (self.createconfigfull != None): self.nocalculation = True - if self.createmask != '': + if self.createmask != "": self.nocalculation = True return + def checkMax(config): - ''' + """ calculate the max twotheta angle (and q) of a detector with current geometry - + :param config: SrXplanarConfig, config instance stores the geometry parameters - + :return: [tthmaxd, qmax], max twotheta angle(in degree) and max q value of current detector. - ''' - xdimension = getattr(config, 'xdimension') - ydimension = getattr(config, 'ydimension') - xbeamcenter = getattr(config, 'xbeamcenter') - ybeamcenter = getattr(config, 'ybeamcenter') - xpixelsize = getattr(config, 'xpixelsize') - ypixelsize = getattr(config, 'ypixelsize') - rotation = getattr(config, 'rotation') - tilt = getattr(config, 'tilt') - distance = getattr(config, 'distance') - wavelength = getattr(config, 'wavelength') - + """ + xdimension = getattr(config, "xdimension") + ydimension = getattr(config, "ydimension") + xbeamcenter = getattr(config, "xbeamcenter") + ybeamcenter = getattr(config, "ybeamcenter") + xpixelsize = getattr(config, "xpixelsize") + ypixelsize = getattr(config, "ypixelsize") + rotation = getattr(config, "rotation") + tilt = getattr(config, "tilt") + distance = getattr(config, "distance") + wavelength = getattr(config, "wavelength") xr = (np.array([0, xdimension + 1]) - xbeamcenter) * xpixelsize yr = (np.array([0, ydimension + 1]) - ybeamcenter) * ypixelsize @@ -336,11 +581,17 @@ def checkMax(config): sourceyr = -distance * sint * sinr sourcezr = distance * cost - dmatrix = ((xr - sourcexr) ** 2).reshape(1, 2) + \ - ((yr - sourceyr) ** 2).reshape(2, 1) + sourcezr ** 2 + dmatrix = ( + ((xr - sourcexr) ** 2).reshape(1, 2) + + ((yr - sourceyr) ** 2).reshape(2, 1) + + sourcezr**2 + ) dmatrix = np.sqrt(dmatrix) - tthmatrix1 = ((-xr + sourcexr) * sourcexr).reshape(1, 2) + \ - ((-yr + sourceyr) * sourceyr).reshape(2, 1) + sourcezr * sourcezr + tthmatrix1 = ( + ((-xr + sourcexr) * sourcexr).reshape(1, 2) + + ((-yr + sourceyr) * sourceyr).reshape(2, 1) + + sourcezr * sourcezr + ) tthmatrix = np.arccos(tthmatrix1 / dmatrix / distance) qmatrix = 4 * np.pi * np.sin(tthmatrix / 2.0) / wavelength @@ -348,9 +599,10 @@ def checkMax(config): qmax = np.max(qmatrix) + 0.1 return tthmaxd, qmax + SrXplanarConfig.initConfigClass() -if __name__ == '__main__': +if __name__ == "__main__": a = SrXplanarConfig() a.updateConfig() - a.writeConfig('test.cfg') + a.writeConfig("test.cfg") diff --git a/diffpy/srxplanar/tifffile.py b/diffpy/srxplanar/tifffile.py index 6a2e4c9..74cb58f 100644 --- a/diffpy/srxplanar/tifffile.py +++ b/diffpy/srxplanar/tifffile.py @@ -151,14 +151,24 @@ import numpy -__version__ = '2014.02.05' -__docformat__ = 'restructuredtext en' -__all__ = ['imsave', 'imread', 'imshow', 'TiffFile', 'TiffSequence'] - - -def imsave(filename, data, photometric=None, planarconfig=None, - resolution=None, description=None, software='tifffile.py', - byteorder=None, bigtiff=False, compress=0, extratags=()): +__version__ = "2014.02.05" +__docformat__ = "restructuredtext en" +__all__ = ["imsave", "imread", "imshow", "TiffFile", "TiffSequence"] + + +def imsave( + filename, + data, + photometric=None, + planarconfig=None, + resolution=None, + description=None, + software="tifffile.py", + byteorder=None, + bigtiff=False, + compress=0, + extratags=(), +): """Write image data to TIFF file. Image data are written in one stripe per plane. @@ -222,47 +232,47 @@ def imsave(filename, data, photometric=None, planarconfig=None, >>> imsave('temp.tif', data, extratags=[(270, 's', 0, value, True)]) """ - assert(photometric in (None, 'minisblack', 'miniswhite', 'rgb')) - assert(planarconfig in (None, 'contig', 'planar')) - assert(byteorder in (None, '<', '>')) - assert(0 <= compress <= 9) + assert photometric in (None, "minisblack", "miniswhite", "rgb") + assert planarconfig in (None, "contig", "planar") + assert byteorder in (None, "<", ">") + assert 0 <= compress <= 9 if byteorder is None: - byteorder = '<' if sys.byteorder == 'little' else '>' + byteorder = "<" if sys.byteorder == "little" else ">" - data = numpy.asarray(data, dtype=byteorder+data.dtype.char, order='C') + data = numpy.asarray(data, dtype=byteorder + data.dtype.char, order="C") data_shape = shape = data.shape data = numpy.atleast_2d(data) - if not bigtiff and data.size * data.dtype.itemsize < 2000*2**20: + if not bigtiff and data.size * data.dtype.itemsize < 2000 * 2**20: bigtiff = False offset_size = 4 tag_size = 12 - numtag_format = 'H' - offset_format = 'I' - val_format = '4s' + numtag_format = "H" + offset_format = "I" + val_format = "4s" else: bigtiff = True offset_size = 8 tag_size = 20 - numtag_format = 'Q' - offset_format = 'Q' - val_format = '8s' + numtag_format = "Q" + offset_format = "Q" + val_format = "8s" # unify shape of data samplesperpixel = 1 extrasamples = 0 if photometric is None: if data.ndim > 2 and (shape[-3] in (3, 4) or shape[-1] in (3, 4)): - photometric = 'rgb' + photometric = "rgb" else: - photometric = 'minisblack' - if photometric == 'rgb': + photometric = "minisblack" + if photometric == "rgb": if len(shape) < 3: raise ValueError("not a RGB(A) image") if planarconfig is None: - planarconfig = 'planar' if shape[-3] in (3, 4) else 'contig' - if planarconfig == 'contig': + planarconfig = "planar" if shape[-3] in (3, 4) else "contig" + if planarconfig == "contig": if shape[-1] not in (3, 4): raise ValueError("not a contiguous RGB(A) image") data = data.reshape((-1, 1) + shape[-3:]) @@ -270,16 +280,16 @@ def imsave(filename, data, photometric=None, planarconfig=None, else: if shape[-3] not in (3, 4): raise ValueError("not a planar RGB(A) image") - data = data.reshape((-1, ) + shape[-3:] + (1, )) + data = data.reshape((-1,) + shape[-3:] + (1,)) samplesperpixel = shape[-3] if samplesperpixel == 4: extrasamples = 1 elif planarconfig and len(shape) > 2: - if planarconfig == 'contig': + if planarconfig == "contig": data = data.reshape((-1, 1) + shape[-3:]) samplesperpixel = shape[-1] else: - data = data.reshape((-1, ) + shape[-3:] + (1, )) + data = data.reshape((-1,) + shape[-3:] + (1,)) samplesperpixel = shape[-3] extrasamples = samplesperpixel - 1 else: @@ -287,28 +297,61 @@ def imsave(filename, data, photometric=None, planarconfig=None, # remove trailing 1s while len(shape) > 2 and shape[-1] == 1: shape = shape[:-1] - data = data.reshape((-1, 1) + shape[-2:] + (1, )) + data = data.reshape((-1, 1) + shape[-2:] + (1,)) shape = data.shape # (pages, planes, height, width, contig samples) - bytestr = bytes if sys.version[0] == '2' else ( - lambda x: bytes(x, 'utf-8') if isinstance(x, str) else x) - tifftypes = {'B': 1, 's': 2, 'H': 3, 'I': 4, '2I': 5, 'b': 6, - 'h': 8, 'i': 9, 'f': 11, 'd': 12, 'Q': 16, 'q': 17} + bytestr = ( + bytes + if sys.version[0] == "2" + else (lambda x: bytes(x, "utf-8") if isinstance(x, str) else x) + ) + tifftypes = { + "B": 1, + "s": 2, + "H": 3, + "I": 4, + "2I": 5, + "b": 6, + "h": 8, + "i": 9, + "f": 11, + "d": 12, + "Q": 16, + "q": 17, + } tifftags = { - 'new_subfile_type': 254, 'subfile_type': 255, - 'image_width': 256, 'image_length': 257, 'bits_per_sample': 258, - 'compression': 259, 'photometric': 262, 'fill_order': 266, - 'document_name': 269, 'image_description': 270, 'strip_offsets': 273, - 'orientation': 274, 'samples_per_pixel': 277, 'rows_per_strip': 278, - 'strip_byte_counts': 279, 'x_resolution': 282, 'y_resolution': 283, - 'planar_configuration': 284, 'page_name': 285, 'resolution_unit': 296, - 'software': 305, 'datetime': 306, 'predictor': 317, 'color_map': 320, - 'extra_samples': 338, 'sample_format': 339} + "new_subfile_type": 254, + "subfile_type": 255, + "image_width": 256, + "image_length": 257, + "bits_per_sample": 258, + "compression": 259, + "photometric": 262, + "fill_order": 266, + "document_name": 269, + "image_description": 270, + "strip_offsets": 273, + "orientation": 274, + "samples_per_pixel": 277, + "rows_per_strip": 278, + "strip_byte_counts": 279, + "x_resolution": 282, + "y_resolution": 283, + "planar_configuration": 284, + "page_name": 285, + "resolution_unit": 296, + "software": 305, + "datetime": 306, + "predictor": 317, + "color_map": 320, + "extra_samples": 338, + "sample_format": 339, + } tags = [] # list of (code, ifdentry, ifdvalue, writeonce) def pack(fmt, *val): - return struct.pack(byteorder+fmt, *val) + return struct.pack(byteorder + fmt, *val) def addtag(code, dtype, count, value, writeonce=False): # compute ifdentry and ifdvalue bytes from code, dtype, count, value @@ -318,26 +361,25 @@ def addtag(code, dtype, count, value, writeonce=False): raise ValueError("unknown dtype %s" % dtype) tifftype = tifftypes[dtype] rawcount = count - if dtype == 's': - value = bytestr(value) + b'\0' + if dtype == "s": + value = bytestr(value) + b"\0" count = rawcount = len(value) - value = (value, ) + value = (value,) if len(dtype) > 1: count *= int(dtype[:-1]) dtype = dtype[-1] - ifdentry = [pack('HH', code, tifftype), - pack(offset_format, rawcount)] + ifdentry = [pack("HH", code, tifftype), pack(offset_format, rawcount)] ifdvalue = None if count == 1: if isinstance(value, (tuple, list)): value = value[0] ifdentry.append(pack(val_format, pack(dtype, value))) elif struct.calcsize(dtype) * count <= offset_size: - ifdentry.append(pack(val_format, pack(str(count)+dtype, *value))) + ifdentry.append(pack(val_format, pack(str(count) + dtype, *value))) else: ifdentry.append(pack(offset_format, 0)) - ifdvalue = pack(str(count)+dtype, *value) - tags.append((code, b''.join(ifdentry), ifdvalue, writeonce)) + ifdvalue = pack(str(count) + dtype, *value) + tags.append((code, b"".join(ifdentry), ifdvalue, writeonce)) def rational(arg, max_denominator=1000000): # return nominator and denominator from float or two integers @@ -349,48 +391,72 @@ def rational(arg, max_denominator=1000000): return f.numerator, f.denominator if software: - addtag('software', 's', 0, software, writeonce=True) + addtag("software", "s", 0, software, writeonce=True) if description: - addtag('image_description', 's', 0, description, writeonce=True) + addtag("image_description", "s", 0, description, writeonce=True) elif shape != data_shape: - addtag('image_description', 's', 0, - "shape=(%s)" % (",".join('%i' % i for i in data_shape)), - writeonce=True) - addtag('datetime', 's', 0, - datetime.datetime.now().strftime("%Y:%m:%d %H:%M:%S"), - writeonce=True) - addtag('compression', 'H', 1, 32946 if compress else 1) - addtag('orientation', 'H', 1, 1) - addtag('image_width', 'I', 1, shape[-2]) - addtag('image_length', 'I', 1, shape[-3]) - addtag('new_subfile_type', 'I', 1, 0 if shape[0] == 1 else 2) - addtag('sample_format', 'H', 1, - {'u': 1, 'i': 2, 'f': 3, 'c': 6}[data.dtype.kind]) - addtag('photometric', 'H', 1, - {'miniswhite': 0, 'minisblack': 1, 'rgb': 2}[photometric]) - addtag('samples_per_pixel', 'H', 1, samplesperpixel) + addtag( + "image_description", + "s", + 0, + "shape=(%s)" % (",".join("%i" % i for i in data_shape)), + writeonce=True, + ) + addtag( + "datetime", + "s", + 0, + datetime.datetime.now().strftime("%Y:%m:%d %H:%M:%S"), + writeonce=True, + ) + addtag("compression", "H", 1, 32946 if compress else 1) + addtag("orientation", "H", 1, 1) + addtag("image_width", "I", 1, shape[-2]) + addtag("image_length", "I", 1, shape[-3]) + addtag("new_subfile_type", "I", 1, 0 if shape[0] == 1 else 2) + addtag( + "sample_format", + "H", + 1, + {"u": 1, "i": 2, "f": 3, "c": 6}[data.dtype.kind], + ) + addtag( + "photometric", + "H", + 1, + {"miniswhite": 0, "minisblack": 1, "rgb": 2}[photometric], + ) + addtag("samples_per_pixel", "H", 1, samplesperpixel) if planarconfig: - addtag('planar_configuration', 'H', 1, 1 if planarconfig=='contig' - else 2) - addtag('bits_per_sample', 'H', samplesperpixel, - (data.dtype.itemsize * 8, ) * samplesperpixel) + addtag( + "planar_configuration", + "H", + 1, + 1 if planarconfig == "contig" else 2, + ) + addtag( + "bits_per_sample", + "H", + samplesperpixel, + (data.dtype.itemsize * 8,) * samplesperpixel, + ) else: - addtag('bits_per_sample', 'H', 1, data.dtype.itemsize * 8) + addtag("bits_per_sample", "H", 1, data.dtype.itemsize * 8) if extrasamples: - if photometric == 'rgb': - addtag('extra_samples', 'H', 1, 1) # alpha channel + if photometric == "rgb": + addtag("extra_samples", "H", 1, 1) # alpha channel else: - addtag('extra_samples', 'H', extrasamples, (0, ) * extrasamples) + addtag("extra_samples", "H", extrasamples, (0,) * extrasamples) if resolution: - addtag('x_resolution', '2I', 1, rational(resolution[0])) - addtag('y_resolution', '2I', 1, rational(resolution[1])) - addtag('resolution_unit', 'H', 1, 2) - addtag('rows_per_strip', 'I', 1, shape[-3]) + addtag("x_resolution", "2I", 1, rational(resolution[0])) + addtag("y_resolution", "2I", 1, rational(resolution[1])) + addtag("resolution_unit", "H", 1, 2) + addtag("rows_per_strip", "I", 1, shape[-3]) # use one strip per plane - strip_byte_counts = (data[0, 0].size * data.dtype.itemsize, ) * shape[1] - addtag('strip_byte_counts', offset_format, shape[1], strip_byte_counts) - addtag('strip_offsets', offset_format, shape[1], (0, ) * shape[1]) + strip_byte_counts = (data[0, 0].size * data.dtype.itemsize,) * shape[1] + addtag("strip_byte_counts", offset_format, shape[1], strip_byte_counts) + addtag("strip_offsets", offset_format, shape[1], (0,) * shape[1]) # add extra tags from users for t in extratags: @@ -399,18 +465,18 @@ def rational(arg, max_denominator=1000000): # the entries in an IFD must be sorted in ascending order by tag code tags = sorted(tags, key=lambda x: x[0]) - with open(filename, 'wb') as fh: + with open(filename, "wb") as fh: seek = fh.seek tell = fh.tell def write(arg, *args): fh.write(pack(arg, *args) if args else arg) - write({'<': b'II', '>': b'MM'}[byteorder]) + write({"<": b"II", ">": b"MM"}[byteorder]) if bigtiff: - write('HHH', 43, 8, 0) + write("HHH", 43, 8, 0) else: - write('H', 42) + write("H", 42) ifd_offset = tell() write(offset_format, 0) # first IFD @@ -424,7 +490,7 @@ def write(arg, *args): # write ifdentries write(numtag_format, len(tags)) tag_offset = tell() - write(b''.join(t[1] for t in tags)) + write(b"".join(t[1] for t in tags)) ifd_offset = tell() write(offset_format, 0) # offset to next IFD @@ -432,7 +498,7 @@ def write(arg, *args): for tagindex, tag in enumerate(tags): if tag[2]: pos = tell() - seek(tag_offset + tagindex*tag_size + offset_size + 4) + seek(tag_offset + tagindex * tag_size + offset_size + 4) write(offset_format, pos) seek(pos) if tag[0] == 273: @@ -465,7 +531,9 @@ def write(arg, *args): write(offset_format, strip_offset) strip_offset += size else: - seek(tag_offset + tagindex*tag_size + offset_size + 4) + seek( + tag_offset + tagindex * tag_size + offset_size + 4 + ) write(offset_format, data_offset) elif tag[0] == 279: # strip_byte_counts if compress: @@ -474,8 +542,12 @@ def write(arg, *args): for size in strip_byte_counts: write(offset_format, size) else: - seek(tag_offset + tagindex*tag_size + - offset_size + 4) + seek( + tag_offset + + tagindex * tag_size + + offset_size + + 4 + ) write(offset_format, strip_byte_counts[0]) break seek(pos) @@ -515,20 +587,20 @@ def imread(files, *args, **kwargs): """ kwargs_file = {} - if 'multifile' in kwargs: - kwargs_file['multifile'] = kwargs['multifile'] - del kwargs['multifile'] + if "multifile" in kwargs: + kwargs_file["multifile"] = kwargs["multifile"] + del kwargs["multifile"] else: - kwargs_file['multifile'] = True + kwargs_file["multifile"] = True kwargs_seq = {} - if 'pattern' in kwargs: - kwargs_seq['pattern'] = kwargs['pattern'] - del kwargs['pattern'] + if "pattern" in kwargs: + kwargs_seq["pattern"] = kwargs["pattern"] + del kwargs["pattern"] - if isinstance(files, basestring) and any(i in files for i in '?*'): + if isinstance(files, basestring) and any(i in files for i in "?*"): files = glob.glob(files) if not files: - raise ValueError('no files found') + raise ValueError("no files found") if len(files) == 1: files = files[0] @@ -542,7 +614,8 @@ def imread(files, *args, **kwargs): class lazyattr(object): """Lazy object attribute whose value is computed on first access.""" - __slots__ = ('func', ) + + __slots__ = ("func",) def __init__(self, func): self.func = func @@ -585,6 +658,7 @@ class TiffFile(object): ... tif.close() """ + def __init__(self, arg, name=None, multifile=False): """Initialize instance from file. @@ -601,7 +675,7 @@ def __init__(self, arg, name=None, multifile=False): """ if isinstance(arg, basestring): filename = os.path.abspath(arg) - self._fh = open(filename, 'rb') + self._fh = open(filename, "rb") else: filename = str(name) self._fh = arg @@ -633,13 +707,14 @@ def _fromfile(self): """Read TIFF header and all page records from file.""" self._fh.seek(0) try: - self.byteorder = {b'II': '<', b'MM': '>'}[self._fh.read(2)] + self.byteorder = {b"II": "<", b"MM": ">"}[self._fh.read(2)] except KeyError: raise ValueError("not a valid TIFF file") - version = struct.unpack(self.byteorder+'H', self._fh.read(2))[0] + version = struct.unpack(self.byteorder + "H", self._fh.read(2))[0] if version == 43: # BigTiff - self.offset_size, zero = struct.unpack(self.byteorder+'HH', - self._fh.read(4)) + self.offset_size, zero = struct.unpack( + self.byteorder + "HH", self._fh.read(4) + ) if zero or self.offset_size != 8: raise ValueError("not a valid BigTIFF file") elif version == 42: @@ -667,69 +742,111 @@ def series(self): if self.is_ome: series = self._omeseries() elif self.is_fluoview: - dims = {b'X': 'X', b'Y': 'Y', b'Z': 'Z', b'T': 'T', - b'WAVELENGTH': 'C', b'TIME': 'T', b'XY': 'R', - b'EVENT': 'V', b'EXPOSURE': 'L'} + dims = { + b"X": "X", + b"Y": "Y", + b"Z": "Z", + b"T": "T", + b"WAVELENGTH": "C", + b"TIME": "T", + b"XY": "R", + b"EVENT": "V", + b"EXPOSURE": "L", + } mmhd = list(reversed(self.pages[0].mm_header.dimensions)) - series = [Record( - axes=''.join(dims.get(i[0].strip().upper(), 'Q') - for i in mmhd if i[1] > 1), - shape=tuple(int(i[1]) for i in mmhd if i[1] > 1), - pages=self.pages, dtype=numpy.dtype(self.pages[0].dtype))] + series = [ + Record( + axes="".join( + dims.get(i[0].strip().upper(), "Q") + for i in mmhd + if i[1] > 1 + ), + shape=tuple(int(i[1]) for i in mmhd if i[1] > 1), + pages=self.pages, + dtype=numpy.dtype(self.pages[0].dtype), + ) + ] elif self.is_lsm: lsmi = self.pages[0].cz_lsm_info axes = CZ_SCAN_TYPES[lsmi.scan_type] if self.pages[0].is_rgb: - axes = axes.replace('C', '').replace('XY', 'XYC') + axes = axes.replace("C", "").replace("XY", "XYC") axes = axes[::-1] shape = [getattr(lsmi, CZ_DIMENSIONS[i]) for i in axes] pages = [p for p in self.pages if not p.is_reduced] - series = [Record(axes=axes, shape=shape, pages=pages, - dtype=numpy.dtype(pages[0].dtype))] + series = [ + Record( + axes=axes, + shape=shape, + pages=pages, + dtype=numpy.dtype(pages[0].dtype), + ) + ] if len(pages) != len(self.pages): # reduced RGB pages pages = [p for p in self.pages if p.is_reduced] cp = 1 i = 0 - while cp < len(pages) and i < len(shape)-2: + while cp < len(pages) and i < len(shape) - 2: cp *= shape[i] i += 1 shape = shape[:i] + list(pages[0].shape) - axes = axes[:i] + 'CYX' - series.append(Record(axes=axes, shape=shape, pages=pages, - dtype=numpy.dtype(pages[0].dtype))) + axes = axes[:i] + "CYX" + series.append( + Record( + axes=axes, + shape=shape, + pages=pages, + dtype=numpy.dtype(pages[0].dtype), + ) + ) elif self.is_imagej: shape = [] axes = [] ij = self.pages[0].imagej_tags - if 'frames' in ij: - shape.append(ij['frames']) - axes.append('T') - if 'slices' in ij: - shape.append(ij['slices']) - axes.append('Z') - if 'channels' in ij and not self.is_rgb: - shape.append(ij['channels']) - axes.append('C') + if "frames" in ij: + shape.append(ij["frames"]) + axes.append("T") + if "slices" in ij: + shape.append(ij["slices"]) + axes.append("Z") + if "channels" in ij and not self.is_rgb: + shape.append(ij["channels"]) + axes.append("C") remain = len(self.pages) // (numpy.prod(shape) if shape else 1) if remain > 1: shape.append(remain) - axes.append('I') + axes.append("I") shape.extend(self.pages[0].shape) axes.extend(self.pages[0].axes) - axes = ''.join(axes) - series = [Record(pages=self.pages, shape=shape, axes=axes, - dtype=numpy.dtype(self.pages[0].dtype))] + axes = "".join(axes) + series = [ + Record( + pages=self.pages, + shape=shape, + axes=axes, + dtype=numpy.dtype(self.pages[0].dtype), + ) + ] elif self.is_nih: - series = [Record(pages=self.pages, - shape=(len(self.pages),) + self.pages[0].shape, - axes='I' + self.pages[0].axes, - dtype=numpy.dtype(self.pages[0].dtype))] + series = [ + Record( + pages=self.pages, + shape=(len(self.pages),) + self.pages[0].shape, + axes="I" + self.pages[0].axes, + dtype=numpy.dtype(self.pages[0].dtype), + ) + ] elif self.pages[0].is_shaped: - shape = self.pages[0].tags['image_description'].value[7:-1] - shape = tuple(int(i) for i in shape.split(b',')) - series = [Record(pages=self.pages, shape=shape, - axes='Q' * len(shape), - dtype=numpy.dtype(self.pages[0].dtype))] + shape = self.pages[0].tags["image_description"].value[7:-1] + shape = tuple(int(i) for i in shape.split(b",")) + series = [ + Record( + pages=self.pages, + shape=shape, + axes="Q" * len(shape), + dtype=numpy.dtype(self.pages[0].dtype), + ) + ] if not series: shapes = [] @@ -737,20 +854,28 @@ def series(self): for page in self.pages: if not page.shape: continue - shape = page.shape + (page.axes, - page.compression in TIFF_DECOMPESSORS) + shape = page.shape + ( + page.axes, + page.compression in TIFF_DECOMPESSORS, + ) if not shape in pages: shapes.append(shape) pages[shape] = [page] else: pages[shape].append(page) - series = [Record(pages=pages[s], - axes=(('I' + s[-2]) - if len(pages[s]) > 1 else s[-2]), - dtype=numpy.dtype(pages[s][0].dtype), - shape=((len(pages[s]), ) + s[:-2] - if len(pages[s]) > 1 else s[:-2])) - for s in shapes] + series = [ + Record( + pages=pages[s], + axes=(("I" + s[-2]) if len(pages[s]) > 1 else s[-2]), + dtype=numpy.dtype(pages[s][0].dtype), + shape=( + (len(pages[s]),) + s[:-2] + if len(pages[s]) > 1 + else s[:-2] + ), + ) + for s in shapes + ] return series def asarray(self, key=None, series=None, memmap=False): @@ -791,7 +916,8 @@ def asarray(self, key=None, series=None, memmap=False): elif self.is_nih: result = numpy.vstack( p.asarray(colormapped=False, squeeze=False, memmap=memmap) - for p in pages) + for p in pages + ) if pages[0].is_palette: result = numpy.take(pages[0].color_map, result, axis=1) result = numpy.swapaxes(result, 0, 1) @@ -799,14 +925,17 @@ def asarray(self, key=None, series=None, memmap=False): if self.is_ome and any(p is None for p in pages): firstpage = next(p for p in pages if p) nopage = numpy.zeros_like(firstpage.asarray(memmap=memmap)) - result = numpy.vstack((p.asarray(memmap=memmap) if p else nopage) - for p in pages) + result = numpy.vstack( + (p.asarray(memmap=memmap) if p else nopage) for p in pages + ) if key is None: try: result.shape = self.series[series].shape except ValueError: - warnings.warn("failed to reshape %s to %s" % ( - result.shape, self.series[series].shape)) + warnings.warn( + "failed to reshape %s to %s" + % (result.shape, self.series[series].shape) + ) result.shape = (-1,) + pages[0].shape else: result.shape = (-1,) + pages[0].shape @@ -814,63 +943,68 @@ def asarray(self, key=None, series=None, memmap=False): def _omeseries(self): """Return image series in OME-TIFF file(s).""" - root = ElementTree.XML(self.pages[0].tags['image_description'].value) - uuid = root.attrib.get('UUID', None) + root = ElementTree.XML(self.pages[0].tags["image_description"].value) + uuid = root.attrib.get("UUID", None) self._tiffs = {uuid: self} modulo = {} result = [] for element in root: - if element.tag.endswith('BinaryOnly'): + if element.tag.endswith("BinaryOnly"): warnings.warn("not an OME-TIFF master file") break - if element.tag.endswith('StructuredAnnotations'): + if element.tag.endswith("StructuredAnnotations"): for annot in element: - if not annot.attrib.get('Namespace', - '').endswith('modulo'): + if not annot.attrib.get("Namespace", "").endswith( + "modulo" + ): continue for value in annot: for modul in value: for along in modul: - if not along.tag[:-1].endswith('Along'): + if not along.tag[:-1].endswith("Along"): continue axis = along.tag[-1] - newaxis = along.attrib.get('Type', 'other') + newaxis = along.attrib.get("Type", "other") newaxis = AXES_LABELS[newaxis] - if 'Start' in along.attrib: + if "Start" in along.attrib: labels = range( - int(along.attrib['Start']), - int(along.attrib['End']) + 1, - int(along.attrib.get('Step', 1))) + int(along.attrib["Start"]), + int(along.attrib["End"]) + 1, + int(along.attrib.get("Step", 1)), + ) else: - labels = [label.text for label in along - if label.tag.endswith('Label')] + labels = [ + label.text + for label in along + if label.tag.endswith("Label") + ] modulo[axis] = (newaxis, labels) - if not element.tag.endswith('Image'): + if not element.tag.endswith("Image"): continue for pixels in element: - if not pixels.tag.endswith('Pixels'): + if not pixels.tag.endswith("Pixels"): continue atr = pixels.attrib - axes = "".join(reversed(atr['DimensionOrder'])) - shape = list(int(atr['Size'+ax]) for ax in axes) + axes = "".join(reversed(atr["DimensionOrder"])) + shape = list(int(atr["Size" + ax]) for ax in axes) size = numpy.prod(shape[:-2]) ifds = [None] * size for data in pixels: - if not data.tag.endswith('TiffData'): + if not data.tag.endswith("TiffData"): continue atr = data.attrib - ifd = int(atr.get('IFD', 0)) - num = int(atr.get('NumPlanes', 1 if 'IFD' in atr else 0)) - num = int(atr.get('PlaneCount', num)) - idx = [int(atr.get('First'+ax, 0)) for ax in axes[:-2]] + ifd = int(atr.get("IFD", 0)) + num = int(atr.get("NumPlanes", 1 if "IFD" in atr else 0)) + num = int(atr.get("PlaneCount", num)) + idx = [int(atr.get("First" + ax, 0)) for ax in axes[:-2]] idx = numpy.ravel_multi_index(idx, shape[:-2]) for uuid in data: - if uuid.tag.endswith('UUID'): + if uuid.tag.endswith("UUID"): if uuid.text not in self._tiffs: if not self._multifile: # abort reading multi file OME series return [] - fn = uuid.attrib['FileName'] + fn = uuid.attrib["FileName"] try: tf = TiffFile(os.path.join(self.fpath, fn)) except (IOError, ValueError): @@ -891,8 +1025,14 @@ def _omeseries(self): ifds[idx + i] = pages[ifd + i] except IndexError: warnings.warn("ome-xml: index out of range") - result.append(Record(axes=axes, shape=shape, pages=ifds, - dtype=numpy.dtype(ifds[0].dtype))) + result.append( + Record( + axes=axes, + shape=shape, + pages=ifds, + dtype=numpy.dtype(ifds[0].dtype), + ) + ) for record in result: for axis, (newaxis, labels) in modulo.items(): @@ -902,8 +1042,8 @@ def _omeseries(self): record.axes = record.axes.replace(axis, newaxis, 1) else: record.shape[i] //= size - record.shape.insert(i+1, size) - record.axes = record.axes.replace(axis, axis+newaxis, 1) + record.shape.insert(i + 1, size) + record.axes = record.axes.replace(axis, axis + newaxis, 1) return result @@ -924,7 +1064,8 @@ def __str__(self): result = [ self.fname.capitalize(), format_size(self._fsize), - {'<': 'little endian', '>': 'big endian'}[self.byteorder]] + {"<": "little endian", ">": "big endian"}[self.byteorder], + ] if self.is_bigtiff: result.append("bigtiff") if len(self.pages) > 1: @@ -1030,6 +1171,7 @@ class TiffPage(object): All attributes are read-only. """ + def __init__(self, parent): """Initialize instance from file.""" self.parent = parent @@ -1055,7 +1197,7 @@ def _fromfile(self): byteorder = self.parent.byteorder offset_size = self.parent.offset_size - fmt = {4: 'I', 8: 'Q'}[offset_size] + fmt = {4: "I", 8: "Q"}[offset_size] offset = struct.unpack(byteorder + fmt, fh.read(offset_size))[0] if not offset: raise StopIteration() @@ -1063,7 +1205,7 @@ def _fromfile(self): # read standard tags tags = self.tags fh.seek(offset) - fmt, size = {4: ('H', 2), 8: ('Q', 8)}[offset_size] + fmt, size = {4: ("H", 2), 8: ("Q", 8)}[offset_size] try: numtags = struct.unpack(byteorder + fmt, fh.read(size))[0] except Exception: @@ -1085,7 +1227,7 @@ def _fromfile(self): else: # some files contain multiple IFD with same code # e.g. MicroManager files contain two image_description - for ext in ('_1', '_2', '_3'): + for ext in ("_1", "_2", "_3"): name = tag.name + ext if not name in tags: tags[name] = tag @@ -1096,14 +1238,14 @@ def _fromfile(self): pos = fh.tell() for name, reader in CZ_LSM_INFO_READERS.items(): try: - offset = self.cz_lsm_info['offset_'+name] + offset = self.cz_lsm_info["offset_" + name] except KeyError: continue if not offset: continue fh.seek(offset) try: - setattr(self, 'cz_lsm_'+name, reader(fh, byteorder)) + setattr(self, "cz_lsm_" + name, reader(fh, byteorder)) except ValueError: pass fh.seek(pos) @@ -1117,46 +1259,57 @@ def _process_tags(self): tags = self.tags for code, (name, default, dtype, count, validate) in TIFF_TAGS.items(): if not (name in tags or default is None): - tags[name] = TiffTag(code, dtype=dtype, count=count, - value=default, name=name) + tags[name] = TiffTag( + code, dtype=dtype, count=count, value=default, name=name + ) if name in tags and validate: try: if tags[name].count == 1: setattr(self, name, validate[tags[name].value]) else: - setattr(self, name, tuple( - validate[value] for value in tags[name].value)) + setattr( + self, + name, + tuple( + validate[value] for value in tags[name].value + ), + ) except KeyError: - raise ValueError("%s.value (%s) not supported" % - (name, tags[name].value)) + raise ValueError( + "%s.value (%s) not supported" + % (name, tags[name].value) + ) - tag = tags['bits_per_sample'] + tag = tags["bits_per_sample"] if tag.count == 1: self.bits_per_sample = tag.value else: - value = tag.value[:self.samples_per_pixel] - if any((v-value[0] for v in value)): + value = tag.value[: self.samples_per_pixel] + if any((v - value[0] for v in value)): self.bits_per_sample = value else: self.bits_per_sample = value[0] - tag = tags['sample_format'] + tag = tags["sample_format"] if tag.count == 1: self.sample_format = TIFF_SAMPLE_FORMATS[tag.value] else: - value = tag.value[:self.samples_per_pixel] - if any((v-value[0] for v in value)): + value = tag.value[: self.samples_per_pixel] + if any((v - value[0] for v in value)): self.sample_format = [TIFF_SAMPLE_FORMATS[v] for v in value] else: self.sample_format = TIFF_SAMPLE_FORMATS[value[0]] - if not 'photometric' in tags: + if not "photometric" in tags: self.photometric = None - if 'image_length' in tags: - self.strips_per_image = int(math.floor( - float(self.image_length + self.rows_per_strip - 1) / - self.rows_per_strip)) + if "image_length" in tags: + self.strips_per_image = int( + math.floor( + float(self.image_length + self.rows_per_strip - 1) + / self.rows_per_strip + ) + ) else: self.strips_per_image = 0 @@ -1165,37 +1318,40 @@ def _process_tags(self): if self.is_imagej: # consolidate imagej meta data - if 'image_description_1' in self.tags: # MicroManager - adict = imagej_description(tags['image_description_1'].value) + if "image_description_1" in self.tags: # MicroManager + adict = imagej_description(tags["image_description_1"].value) else: - adict = imagej_description(tags['image_description'].value) - if 'imagej_metadata' in tags: + adict = imagej_description(tags["image_description"].value) + if "imagej_metadata" in tags: try: - adict.update(imagej_metadata( - tags['imagej_metadata'].value, - tags['imagej_byte_counts'].value, - self.parent.byteorder)) + adict.update( + imagej_metadata( + tags["imagej_metadata"].value, + tags["imagej_byte_counts"].value, + self.parent.byteorder, + ) + ) except Exception as e: warnings.warn(str(e)) self.imagej_tags = Record(adict) - if not 'image_length' in self.tags or not 'image_width' in self.tags: + if not "image_length" in self.tags or not "image_width" in self.tags: # some GEL file pages are missing image data self.image_length = 0 self.image_width = 0 self.strip_offsets = 0 self._shape = () self.shape = () - self.axes = '' + self.axes = "" if self.is_palette: - self.dtype = self.tags['color_map'].dtype[1] + self.dtype = self.tags["color_map"].dtype[1] self.color_map = numpy.array(self.color_map, self.dtype) dmax = self.color_map.max() if dmax < 256: self.dtype = numpy.uint8 self.color_map = self.color_map.astype(self.dtype) - #else: + # else: # self.dtype = numpy.uint8 # self.color_map >>= 8 # self.color_map = self.color_map.astype(self.dtype) @@ -1203,65 +1359,99 @@ def _process_tags(self): if self.is_stk: # consolidate mm_uci tags - planes = tags['mm_uic2'].count - self.mm_uic_tags = Record(tags['mm_uic2'].value) - for key in ('mm_uic3', 'mm_uic4', 'mm_uic1'): + planes = tags["mm_uic2"].count + self.mm_uic_tags = Record(tags["mm_uic2"].value) + for key in ("mm_uic3", "mm_uic4", "mm_uic1"): if key in tags: self.mm_uic_tags.update(tags[key].value) - if self.planar_configuration == 'contig': - self._shape = (planes, 1, self.image_length, self.image_width, - self.samples_per_pixel) + if self.planar_configuration == "contig": + self._shape = ( + planes, + 1, + self.image_length, + self.image_width, + self.samples_per_pixel, + ) self.shape = tuple(self._shape[i] for i in (0, 2, 3, 4)) - self.axes = 'PYXS' + self.axes = "PYXS" else: - self._shape = (planes, self.samples_per_pixel, - self.image_length, self.image_width, 1) + self._shape = ( + planes, + self.samples_per_pixel, + self.image_length, + self.image_width, + 1, + ) self.shape = self._shape[:4] - self.axes = 'PSYX' - if self.is_palette and (self.color_map.shape[1] - >= 2**self.bits_per_sample): + self.axes = "PSYX" + if self.is_palette and ( + self.color_map.shape[1] >= 2**self.bits_per_sample + ): self.shape = (3, planes, self.image_length, self.image_width) - self.axes = 'CPYX' + self.axes = "CPYX" else: warnings.warn("palette cannot be applied") self.is_palette = False elif self.is_palette: samples = 1 - if 'extra_samples' in self.tags: + if "extra_samples" in self.tags: samples += len(self.extra_samples) - if self.planar_configuration == 'contig': + if self.planar_configuration == "contig": self._shape = ( - 1, 1, self.image_length, self.image_width, samples) + 1, + 1, + self.image_length, + self.image_width, + samples, + ) else: self._shape = ( - 1, samples, self.image_length, self.image_width, 1) + 1, + samples, + self.image_length, + self.image_width, + 1, + ) if self.color_map.shape[1] >= 2**self.bits_per_sample: self.shape = (3, self.image_length, self.image_width) - self.axes = 'CYX' + self.axes = "CYX" else: warnings.warn("palette cannot be applied") self.is_palette = False self.shape = (self.image_length, self.image_width) - self.axes = 'YX' + self.axes = "YX" elif self.is_rgb or self.samples_per_pixel > 1: - if self.planar_configuration == 'contig': - self._shape = (1, 1, self.image_length, self.image_width, - self.samples_per_pixel) - self.shape = (self.image_length, self.image_width, - self.samples_per_pixel) - self.axes = 'YXS' + if self.planar_configuration == "contig": + self._shape = ( + 1, + 1, + self.image_length, + self.image_width, + self.samples_per_pixel, + ) + self.shape = ( + self.image_length, + self.image_width, + self.samples_per_pixel, + ) + self.axes = "YXS" else: - self._shape = (1, self.samples_per_pixel, self.image_length, - self.image_width, 1) + self._shape = ( + 1, + self.samples_per_pixel, + self.image_length, + self.image_width, + 1, + ) self.shape = self._shape[1:-1] - self.axes = 'SYX' - if self.is_rgb and 'extra_samples' in self.tags: + self.axes = "SYX" + if self.is_rgb and "extra_samples" in self.tags: extra_samples = self.extra_samples - if self.tags['extra_samples'].count == 1: - extra_samples = (extra_samples, ) + if self.tags["extra_samples"].count == 1: + extra_samples = (extra_samples,) for exs in extra_samples: - if exs in ('unassalpha', 'assocalpha', 'unspecified'): - if self.planar_configuration == 'contig': + if exs in ("unassalpha", "assocalpha", "unspecified"): + if self.planar_configuration == "contig": self.shape = self.shape[:2] + (4,) else: self.shape = (4,) + self.shape[1:] @@ -1269,14 +1459,16 @@ def _process_tags(self): else: self._shape = (1, 1, self.image_length, self.image_width, 1) self.shape = self._shape[2:4] - self.axes = 'YX' + self.axes = "YX" - if not self.compression and not 'strip_byte_counts' in tags: + if not self.compression and not "strip_byte_counts" in tags: self.strip_byte_counts = numpy.prod(self.shape) * ( - self.bits_per_sample // 8) + self.bits_per_sample // 8 + ) - def asarray(self, squeeze=True, colormapped=True, rgbonly=True, - memmap=False): + def asarray( + self, squeeze=True, colormapped=True, rgbonly=True, memmap=False + ): """Read image data from file and return as numpy array. Raise ValueError if format is unsupported. @@ -1300,15 +1492,18 @@ def asarray(self, squeeze=True, colormapped=True, rgbonly=True, if not fh: raise IOError("TIFF file is not open") if self.dtype is None: - raise ValueError("data type not supported: %s%i" % ( - self.sample_format, self.bits_per_sample)) + raise ValueError( + "data type not supported: %s%i" + % (self.sample_format, self.bits_per_sample) + ) if self.compression not in TIFF_DECOMPESSORS: raise ValueError("cannot decompress %s" % self.compression) - if ('ycbcr_subsampling' in self.tags - and self.tags['ycbcr_subsampling'].value not in (1, (1, 1))): + if "ycbcr_subsampling" in self.tags and self.tags[ + "ycbcr_subsampling" + ].value not in (1, (1, 1)): raise ValueError("YCbCr subsampling not supported") - tag = self.tags['sample_format'] - if tag.count != 1 and any((i-tag.value[0] for i in tag.value)): + tag = self.tags["sample_format"] + if tag.count != 1 and any((i - tag.value[0] for i in tag.value)): raise ValueError("sample formats don't match %s" % str(tag.value)) dtype = self._dtype @@ -1321,11 +1516,12 @@ def asarray(self, squeeze=True, colormapped=True, rgbonly=True, image_length = self.image_length typecode = self.parent.byteorder + dtype bits_per_sample = self.bits_per_sample - byteorder_is_native = ({'big': '>', 'little': '<'}[sys.byteorder] == - self.parent.byteorder) + byteorder_is_native = {"big": ">", "little": "<"}[ + sys.byteorder + ] == self.parent.byteorder if self.is_tiled: - if 'tile_offsets' in self.tags: + if "tile_offsets" in self.tags: byte_counts = self.tile_byte_counts offsets = self.tile_offsets else: @@ -1335,7 +1531,7 @@ def asarray(self, squeeze=True, colormapped=True, rgbonly=True, tile_length = self.tile_length tw = (image_width + tile_width - 1) // tile_width tl = (image_length + tile_length - 1) // tile_length - shape = shape[:-3] + (tl*tile_length, tw*tile_width, shape[-1]) + shape = shape[:-3] + (tl * tile_length, tw * tile_width, shape[-1]) tile_shape = (tile_length, tile_width, shape[-1]) runlen = tile_width else: @@ -1346,27 +1542,37 @@ def asarray(self, squeeze=True, colormapped=True, rgbonly=True, try: offsets[0] except TypeError: - offsets = (offsets, ) - byte_counts = (byte_counts, ) + offsets = (offsets,) + byte_counts = (byte_counts,) if any(o < 2 for o in offsets): raise ValueError("corrupted page") - if (not self.is_tiled and (self.is_stk or (not self.compression - and bits_per_sample in (8, 16, 32, 64) - and all(offsets[i] == offsets[i+1] - byte_counts[i] - for i in range(len(offsets)-1))))): + if not self.is_tiled and ( + self.is_stk + or ( + not self.compression + and bits_per_sample in (8, 16, 32, 64) + and all( + offsets[i] == offsets[i + 1] - byte_counts[i] + for i in range(len(offsets) - 1) + ) + ) + ): # contiguous data - if (memmap and not (self.is_tiled or self.predictor or - ('extra_samples' in self.tags) or - (colormapped and self.is_palette) or - (not byteorder_is_native))): - result = numpy.memmap(fh, typecode, 'r', offsets[0], shape) + if memmap and not ( + self.is_tiled + or self.predictor + or ("extra_samples" in self.tags) + or (colormapped and self.is_palette) + or (not byteorder_is_native) + ): + result = numpy.memmap(fh, typecode, "r", offsets[0], shape) else: fh.seek(offsets[0]) result = numpy_fromfile(fh, typecode, numpy.prod(shape)) - result = result.astype('=' + dtype) + result = result.astype("=" + dtype) else: - if self.planar_configuration == 'contig': + if self.planar_configuration == "contig": runlen *= self.samples_per_pixel if bits_per_sample in (8, 16, 32, 64, 128): if (bits_per_sample * runlen) % 8: @@ -1374,12 +1580,17 @@ def asarray(self, squeeze=True, colormapped=True, rgbonly=True, def unpack(x): return numpy.fromstring(x, typecode) + elif isinstance(bits_per_sample, tuple): + def unpack(x): return unpackrgb(x, typecode, bits_per_sample) + else: + def unpack(x): return unpackints(x, typecode, bits_per_sample, runlen) + decompress = TIFF_DECOMPESSORS[self.compression] if self.is_tiled: result = numpy.empty(shape, dtype) @@ -1388,10 +1599,11 @@ def unpack(x): fh.seek(offset) tile = unpack(decompress(fh.read(bytecount))) tile.shape = tile_shape - if self.predictor == 'horizontal': + if self.predictor == "horizontal": numpy.cumsum(tile, axis=-2, dtype=dtype, out=tile) - result[0, pl, tl:tl+tile_length, - tw:tw+tile_width, :] = tile + result[ + 0, pl, tl : tl + tile_length, tw : tw + tile_width, : + ] = tile del tile tw += tile_width if tw >= shape[-2]: @@ -1400,23 +1612,30 @@ def unpack(x): tl, pl = 0, pl + 1 result = result[..., :image_length, :image_width, :] else: - strip_size = (self.rows_per_strip * self.image_width * - self.samples_per_pixel) + strip_size = ( + self.rows_per_strip + * self.image_width + * self.samples_per_pixel + ) result = numpy.empty(shape, dtype).reshape(-1) index = 0 for offset, bytecount in zip(offsets, byte_counts): fh.seek(offset) strip = fh.read(bytecount) strip = unpack(decompress(strip)) - size = min(result.size, strip.size, strip_size, - result.size - index) - result[index:index+size] = strip[:size] + size = min( + result.size, + strip.size, + strip_size, + result.size - index, + ) + result[index : index + size] = strip[:size] del strip index += size result.shape = self._shape - if self.predictor == 'horizontal' and not self.is_tiled: + if self.predictor == "horizontal" and not self.is_tiled: # work around bug in LSM510 software if not (self.parent.is_lsm and not self.compression): numpy.cumsum(result, axis=-2, dtype=dtype, out=result) @@ -1424,22 +1643,23 @@ def unpack(x): if colormapped and self.is_palette: if self.color_map.shape[1] >= 2**bits_per_sample: # FluoView and LSM might fail here - result = numpy.take(self.color_map, - result[:, 0, :, :, 0], axis=1) - elif rgbonly and self.is_rgb and 'extra_samples' in self.tags: + result = numpy.take( + self.color_map, result[:, 0, :, :, 0], axis=1 + ) + elif rgbonly and self.is_rgb and "extra_samples" in self.tags: # return only RGB and first alpha channel if exists extra_samples = self.extra_samples - if self.tags['extra_samples'].count == 1: - extra_samples = (extra_samples, ) + if self.tags["extra_samples"].count == 1: + extra_samples = (extra_samples,) for i, exs in enumerate(extra_samples): - if exs in ('unassalpha', 'assocalpha', 'unspecified'): - if self.planar_configuration == 'contig': - result = result[..., [0, 1, 2, 3+i]] + if exs in ("unassalpha", "assocalpha", "unspecified"): + if self.planar_configuration == "contig": + result = result[..., [0, 1, 2, 3 + i]] else: - result = result[:, [0, 1, 2, 3+i]] + result = result[:, [0, 1, 2, 3 + i]] break else: - if self.planar_configuration == 'contig': + if self.planar_configuration == "contig": result = result[..., :3] else: result = result[:, :3] @@ -1448,23 +1668,43 @@ def unpack(x): try: result.shape = self.shape except ValueError: - warnings.warn("failed to reshape from %s to %s" % ( - str(result.shape), str(self.shape))) + warnings.warn( + "failed to reshape from %s to %s" + % (str(result.shape), str(self.shape)) + ) return result def __str__(self): """Return string containing information about page.""" - s = ', '.join(s for s in ( - ' x '.join(str(i) for i in self.shape), - str(numpy.dtype(self.dtype)), - '%s bit' % str(self.bits_per_sample), - self.photometric if 'photometric' in self.tags else '', - self.compression if self.compression else 'raw', - '|'.join(t[3:] for t in ( - 'is_stk', 'is_lsm', 'is_nih', 'is_ome', 'is_imagej', - 'is_micromanager', 'is_fluoview', 'is_mdgel', 'is_mediacy', - 'is_reduced', 'is_tiled') if getattr(self, t))) if s) + s = ", ".join( + s + for s in ( + " x ".join(str(i) for i in self.shape), + str(numpy.dtype(self.dtype)), + "%s bit" % str(self.bits_per_sample), + self.photometric if "photometric" in self.tags else "", + self.compression if self.compression else "raw", + "|".join( + t[3:] + for t in ( + "is_stk", + "is_lsm", + "is_nih", + "is_ome", + "is_imagej", + "is_micromanager", + "is_fluoview", + "is_mdgel", + "is_mediacy", + "is_reduced", + "is_tiled", + ) + if getattr(self, t) + ), + ) + if s + ) return "Page %i: %s" % (self.index, s) def __getattr__(self, name): @@ -1478,81 +1718,88 @@ def __getattr__(self, name): @lazyattr def is_rgb(self): """True if page contains a RGB image.""" - return ('photometric' in self.tags and - self.tags['photometric'].value == 2) + return ( + "photometric" in self.tags and self.tags["photometric"].value == 2 + ) @lazyattr def is_palette(self): """True if page contains a palette-colored image.""" - return ('photometric' in self.tags and - self.tags['photometric'].value == 3) + return ( + "photometric" in self.tags and self.tags["photometric"].value == 3 + ) @lazyattr def is_tiled(self): """True if page contains tiled image.""" - return 'tile_width' in self.tags + return "tile_width" in self.tags @lazyattr def is_reduced(self): """True if page is a reduced image of another image.""" - return bool(self.tags['new_subfile_type'].value & 1) + return bool(self.tags["new_subfile_type"].value & 1) @lazyattr def is_mdgel(self): """True if page contains md_file_tag tag.""" - return 'md_file_tag' in self.tags + return "md_file_tag" in self.tags @lazyattr def is_mediacy(self): """True if page contains Media Cybernetics Id tag.""" - return ('mc_id' in self.tags and - self.tags['mc_id'].value.startswith(b'MC TIFF')) + return "mc_id" in self.tags and self.tags["mc_id"].value.startswith( + b"MC TIFF" + ) @lazyattr def is_stk(self): """True if page contains MM_UIC2 tag.""" - return 'mm_uic2' in self.tags + return "mm_uic2" in self.tags @lazyattr def is_lsm(self): """True if page contains LSM CZ_LSM_INFO tag.""" - return 'cz_lsm_info' in self.tags + return "cz_lsm_info" in self.tags @lazyattr def is_fluoview(self): """True if page contains FluoView MM_STAMP tag.""" - return 'mm_stamp' in self.tags + return "mm_stamp" in self.tags @lazyattr def is_nih(self): """True if page contains NIH image header.""" - return 'nih_image_header' in self.tags + return "nih_image_header" in self.tags @lazyattr def is_ome(self): """True if page contains OME-XML in image_description tag.""" - return ('image_description' in self.tags and self.tags[ - 'image_description'].value.startswith(b' parent.offset_size or code in CUSTOM_TAGS: pos = fh.tell() - tof = {4: 'I', 8: 'Q'}[parent.offset_size] - self.value_offset = offset = struct.unpack(byteorder+tof, value)[0] + tof = {4: "I", 8: "Q"}[parent.offset_size] + self.value_offset = offset = struct.unpack(byteorder + tof, value)[ + 0 + ] if offset < 0 or offset > parent._fsize: raise TiffTag.Error("corrupt file - invalid tag value offset") elif offset < 4: @@ -1640,7 +1898,7 @@ def _fromfile(self, parent): fh.seek(0, 2) # bug in numpy/Python 3.x ? if isinstance(value, dict): # numpy.core.records.record value = Record(value) - elif code in TIFF_TAGS or dtype[-1] == 's': + elif code in TIFF_TAGS or dtype[-1] == "s": value = struct.unpack(fmt, fh.read(size)) else: value = read_numpy(fh, byteorder, dtype, count) @@ -1653,7 +1911,7 @@ def _fromfile(self, parent): if len(value) == 1: value = value[0] - if dtype.endswith('s') and isinstance(value, bytes): + if dtype.endswith("s") and isinstance(value, bytes): value = stripnull(value) self.code = code @@ -1664,7 +1922,7 @@ def _fromfile(self, parent): def __str__(self): """Return string containing information about tag.""" - return ' '.join(str(getattr(self, s)) for s in self.__slots__) + return " ".join(str(getattr(self, s)) for s in self.__slots__) class TiffSequence(object): @@ -1687,6 +1945,7 @@ class TiffSequence(object): (2, 100, 256, 256) """ + _axes_pattern = """ # matches Olympus OIF and Leica TIFF series _?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4})) @@ -1701,7 +1960,7 @@ class TiffSequence(object): class _ParseError(Exception): pass - def __init__(self, files, imread=TiffFile, pattern='axes'): + def __init__(self, files, imread=TiffFile, pattern="axes"): """Initialize instance from multiple files. Parameters @@ -1721,11 +1980,11 @@ def __init__(self, files, imread=TiffFile, pattern='axes'): files = list(files) if not files: raise ValueError("no files found") - #if not os.path.isfile(files[0]): + # if not os.path.isfile(files[0]): # raise ValueError("file not found") self.files = files - if hasattr(imread, 'asarray'): + if hasattr(imread, "asarray"): _imread = imread def imread(fname, *args, **kwargs): @@ -1734,24 +1993,27 @@ def imread(fname, *args, **kwargs): self.imread = imread - self.pattern = self._axes_pattern if pattern == 'axes' else pattern + self.pattern = self._axes_pattern if pattern == "axes" else pattern try: self._parse() if not self.axes: - self.axes = 'I' + self.axes = "I" except self._ParseError: - self.axes = 'I' + self.axes = "I" self.shape = (len(files),) self._start_index = (0,) self._indices = ((i,) for i in range(len(files))) def __str__(self): """Return string with information about image sequence.""" - return "\n".join([ - self.files[0], - '* files: %i' % len(self.files), - '* axes: %s' % self.axes, - '* shape: %s' % str(self.shape)]) + return "\n".join( + [ + self.files[0], + "* files: %i" % len(self.files), + "* axes: %s" % self.axes, + "* shape: %s" % str(self.shape), + ] + ) def __len__(self): return len(self.files) @@ -1776,7 +2038,7 @@ def asarray(self, *args, **kwargs): result = numpy.zeros(result_shape, dtype=im.dtype) result = result.reshape(-1, *im.shape) for index, fname in zip(self._indices, self.files): - index = [i-j for i, j in zip(index, self._start_index)] + index = [i - j for i, j in zip(index, self._start_index)] index = numpy.ravel_multi_index(index, self.shape) im = self.imread(fname, *args, **kwargs) result[index] = im @@ -1794,19 +2056,19 @@ def _parse(self): matches = matches[-1] if len(matches) % 2: raise self._ParseError("pattern doesn't match axis name and index") - axes = ''.join(m for m in matches[::2] if m) + axes = "".join(m for m in matches[::2] if m) if not axes: raise self._ParseError("pattern doesn't match file names") indices = [] for fname in self.files: matches = pattern.findall(fname)[-1] - if axes != ''.join(m for m in matches[::2] if m): + if axes != "".join(m for m in matches[::2] if m): raise ValueError("axes don't match within the image sequence") indices.append([int(m) for m in matches[1::2] if m]) shape = tuple(numpy.max(indices, axis=0)) start_index = tuple(numpy.min(indices, axis=0)) - shape = tuple(i-j+1 for i, j in zip(shape, start_index)) + shape = tuple(i - j + 1 for i, j in zip(shape, start_index)) if numpy.prod(shape) != len(self.files): warnings.warn("files are missing. Missing data are zeroed") @@ -1822,6 +2084,7 @@ class Record(dict): Can also be initialized with numpy.core.records.record. """ + __slots__ = () def __init__(self, arg=None, **kwargs): @@ -1834,7 +2097,7 @@ def __init__(self, arg=None, **kwargs): except (TypeError, ValueError): for i, name in enumerate(arg.dtype.names): v = arg[i] - self[name] = v if v.dtype.char != 'S' else stripnull(v) + self[name] = v if v.dtype.char != "S" else stripnull(v) def __getattr__(self, name): return self[name] @@ -1847,7 +2110,7 @@ def __str__(self): s = [] lists = [] for k in sorted(self): - if k.startswith('_'): # does not work with byte + if k.startswith("_"): # does not work with byte continue v = self[k] if isinstance(v, (list, tuple)) and len(v): @@ -1857,43 +2120,51 @@ def __str__(self): elif isinstance(v[0], TiffPage): v = [i.index for i in v if i] s.append( - ("* %s: %s" % (k, str(v))).split("\n", 1)[0] - [:PRINT_LINE_LEN].rstrip()) + ("* %s: %s" % (k, str(v))) + .split("\n", 1)[0][:PRINT_LINE_LEN] + .rstrip() + ) for k, v in lists: l = [] for i, w in enumerate(v): - l.append("* %s[%i]\n %s" % (k, i, - str(w).replace("\n", "\n "))) - s.append('\n'.join(l)) - return '\n'.join(s) + l.append( + "* %s[%i]\n %s" % (k, i, str(w).replace("\n", "\n ")) + ) + s.append("\n".join(l)) + return "\n".join(s) class TiffTags(Record): """Dictionary of TiffTags with attribute access.""" + def __str__(self): """Return string with information about all tags.""" s = [] for tag in sorted(self.values(), key=lambda x: x.code): typecode = "%i%s" % (tag.count * int(tag.dtype[0]), tag.dtype[1]) - line = "* %i %s (%s) %s" % (tag.code, tag.name, typecode, - str(tag.value).split('\n', 1)[0]) + line = "* %i %s (%s) %s" % ( + tag.code, + tag.name, + typecode, + str(tag.value).split("\n", 1)[0], + ) s.append(line[:PRINT_LINE_LEN].lstrip()) - return '\n'.join(s) + return "\n".join(s) def read_bytes(fh, byteorder, dtype, count): """Read tag data from file and return as byte string.""" - return numpy_fromfile(fh, byteorder+dtype[-1], count).tostring() + return numpy_fromfile(fh, byteorder + dtype[-1], count).tostring() def read_numpy(fh, byteorder, dtype, count): """Read tag data from file and return as numpy array.""" - return numpy_fromfile(fh, byteorder+dtype[-1], count) + return numpy_fromfile(fh, byteorder + dtype[-1], count) def read_json(fh, byteorder, dtype, count): """Read tag data from file and return as object.""" - return json.loads(unicode(stripnull(fh.read(count)), 'utf-8')) + return json.loads(unicode(stripnull(fh.read(count)), "utf-8")) def read_mm_header(fh, byteorder, dtype, count): @@ -1903,65 +2174,65 @@ def read_mm_header(fh, byteorder, dtype, count): def read_mm_stamp(fh, byteorder, dtype, count): """Read MM_STAMP tag from file and return as numpy.array.""" - return numpy_fromfile(fh, byteorder+'8f8', 1)[0] + return numpy_fromfile(fh, byteorder + "8f8", 1)[0] def read_mm_uic1(fh, byteorder, dtype, count): """Read MM_UIC1 tag from file and return as dictionary.""" - t = fh.read(8*count) - t = struct.unpack('%s%iI' % (byteorder, 2*count), t) - return dict((MM_TAG_IDS[k], v) for k, v in zip(t[::2], t[1::2]) - if k in MM_TAG_IDS) + t = fh.read(8 * count) + t = struct.unpack("%s%iI" % (byteorder, 2 * count), t) + return dict( + (MM_TAG_IDS[k], v) for k, v in zip(t[::2], t[1::2]) if k in MM_TAG_IDS + ) def read_mm_uic2(fh, byteorder, dtype, count): """Read MM_UIC2 tag from file and return as dictionary.""" - result = {'number_planes': count} - values = numpy_fromfile(fh, byteorder+'I', 6*count) - result['z_distance'] = values[0::6] // values[1::6] - #result['date_created'] = tuple(values[2::6]) - #result['time_created'] = tuple(values[3::6]) - #result['date_modified'] = tuple(values[4::6]) - #result['time_modified'] = tuple(values[5::6]) + result = {"number_planes": count} + values = numpy_fromfile(fh, byteorder + "I", 6 * count) + result["z_distance"] = values[0::6] // values[1::6] + # result['date_created'] = tuple(values[2::6]) + # result['time_created'] = tuple(values[3::6]) + # result['date_modified'] = tuple(values[4::6]) + # result['time_modified'] = tuple(values[5::6]) return result def read_mm_uic3(fh, byteorder, dtype, count): """Read MM_UIC3 tag from file and return as dictionary.""" - t = numpy_fromfile(fh, byteorder+'I', 2*count) - return {'wavelengths': t[0::2] // t[1::2]} + t = numpy_fromfile(fh, byteorder + "I", 2 * count) + return {"wavelengths": t[0::2] // t[1::2]} def read_mm_uic4(fh, byteorder, dtype, count): """Read MM_UIC4 tag from file and return as dictionary.""" - t = struct.unpack(byteorder + 'hI'*count, fh.read(6*count)) - return dict((MM_TAG_IDS[k], v) for k, v in zip(t[::2], t[1::2]) - if k in MM_TAG_IDS) + t = struct.unpack(byteorder + "hI" * count, fh.read(6 * count)) + return dict( + (MM_TAG_IDS[k], v) for k, v in zip(t[::2], t[1::2]) if k in MM_TAG_IDS + ) def read_cz_lsm_info(fh, byteorder, dtype, count): """Read CS_LSM_INFO tag from file and return as numpy.rec.array.""" - result = numpy.rec.fromfile(fh, CZ_LSM_INFO, 1, - byteorder=byteorder)[0] - {50350412: '1.3', 67127628: '2.0'}[result.magic_number] # validation + result = numpy.rec.fromfile(fh, CZ_LSM_INFO, 1, byteorder=byteorder)[0] + {50350412: "1.3", 67127628: "2.0"}[result.magic_number] # validation return result def read_cz_lsm_time_stamps(fh, byteorder): """Read LSM time stamps from file and return as list.""" - size, count = struct.unpack(byteorder+'II', fh.read(8)) + size, count = struct.unpack(byteorder + "II", fh.read(8)) if size != (8 + 8 * count): raise ValueError("lsm_time_stamps block is too short") - return struct.unpack(('%s%dd' % (byteorder, count)), - fh.read(8*count)) + return struct.unpack(("%s%dd" % (byteorder, count)), fh.read(8 * count)) def read_cz_lsm_event_list(fh, byteorder): """Read LSM events from file and return as list of (time, type, text).""" - count = struct.unpack(byteorder+'II', fh.read(8))[1] + count = struct.unpack(byteorder + "II", fh.read(8))[1] events = [] while count > 0: - esize, etime, etype = struct.unpack(byteorder+'IdI', fh.read(16)) + esize, etime, etype = struct.unpack(byteorder + "IdI", fh.read(16)) etext = stripnull(fh.read(esize - 16)) events.append((etime, etype, etext)) count -= 1 @@ -1973,17 +2244,17 @@ def read_cz_lsm_scan_info(fh, byteorder): block = Record() blocks = [block] unpack = struct.unpack - if 0x10000000 != struct.unpack(byteorder+"I", fh.read(4))[0]: + if 0x10000000 != struct.unpack(byteorder + "I", fh.read(4))[0]: raise ValueError("not a lsm_scan_info structure") fh.read(8) while True: - entry, dtype, size = unpack(byteorder+"III", fh.read(12)) + entry, dtype, size = unpack(byteorder + "III", fh.read(12)) if dtype == 2: value = stripnull(fh.read(size)) elif dtype == 4: - value = unpack(byteorder+"i", fh.read(4))[0] + value = unpack(byteorder + "i", fh.read(4))[0] elif dtype == 5: - value = unpack(byteorder+"d", fh.read(8))[0] + value = unpack(byteorder + "d", fh.read(8))[0] else: value = 0 if entry in CZ_LSM_SCAN_INFO_ARRAYS: @@ -2000,7 +2271,7 @@ def read_cz_lsm_scan_info(fh, byteorder): elif entry in CZ_LSM_SCAN_INFO_ATTRIBUTES: name = CZ_LSM_SCAN_INFO_ATTRIBUTES[entry] setattr(block, name, value) - elif entry == 0xffffffff: + elif entry == 0xFFFFFFFF: block = blocks.pop() else: setattr(block, "unknown_%x" % entry, value) @@ -2013,40 +2284,42 @@ def read_nih_image_header(fh, byteorder, dtype, count): """Read NIH_IMAGE_HEADER tag from file and return as numpy.rec.array.""" a = numpy.rec.fromfile(fh, NIH_IMAGE_HEADER, 1, byteorder=byteorder)[0] a = a.newbyteorder(byteorder) - a.xunit = a.xunit[:a._xunit_len] - a.um = a.um[:a._um_len] + a.xunit = a.xunit[: a._xunit_len] + a.um = a.um[: a._um_len] return a def imagej_metadata(data, bytecounts, byteorder): """Return dict from ImageJ meta data tag value.""" - _str = str if sys.version_info[0] < 3 else lambda x: str(x, 'cp1252') + _str = str if sys.version_info[0] < 3 else lambda x: str(x, "cp1252") def read_string(data, byteorder): - return _str(stripnull(data[0 if byteorder == '<' else 1::2])) + return _str(stripnull(data[0 if byteorder == "<" else 1 :: 2])) def read_double(data, byteorder): - return struct.unpack(byteorder+('d' * (len(data) // 8)), data) + return struct.unpack(byteorder + ("d" * (len(data) // 8)), data) def read_bytes(data, byteorder): - #return struct.unpack('b' * len(data), data) - return numpy.fromstring(data, 'uint8') + # return struct.unpack('b' * len(data), data) + return numpy.fromstring(data, "uint8") metadata_types = { # big endian - b'info': ('info', read_string), - b'labl': ('labels', read_string), - b'rang': ('ranges', read_double), - b'luts': ('luts', read_bytes), - b'roi ': ('roi', read_bytes), - b'over': ('overlays', read_bytes)} + b"info": ("info", read_string), + b"labl": ("labels", read_string), + b"rang": ("ranges", read_double), + b"luts": ("luts", read_bytes), + b"roi ": ("roi", read_bytes), + b"over": ("overlays", read_bytes), + } metadata_types.update( # little endian - dict((k[::-1], v) for k, v in metadata_types.items())) + dict((k[::-1], v) for k, v in metadata_types.items()) + ) if not bytecounts: raise ValueError("no ImageJ meta data") - if not data[:4] in (b'IJIJ', b'JIJI'): + if not data[:4] in (b"IJIJ", b"JIJI"): raise ValueError("invalid ImageJ meta data") header_size = bytecounts[0] @@ -2054,7 +2327,9 @@ def read_bytes(data, byteorder): raise ValueError("invalid ImageJ meta data header size") ntypes = (header_size - 4) // 8 - header = struct.unpack(byteorder+'4sI'*ntypes, data[4:4+ntypes*8]) + header = struct.unpack( + byteorder + "4sI" * ntypes, data[4 : 4 + ntypes * 8] + ) pos = 4 + ntypes * 8 counter = 0 result = {} @@ -2072,14 +2347,15 @@ def read_bytes(data, byteorder): def imagej_description(description): """Return dict from ImageJ image_description tag.""" + def _bool(val): - return {b'true': True, b'false': False}[val.lower()] + return {b"true": True, b"false": False}[val.lower()] - _str = str if sys.version_info[0] < 3 else lambda x: str(x, 'cp1252') + _str = str if sys.version_info[0] < 3 else lambda x: str(x, "cp1252") result = {} for line in description.splitlines(): try: - key, val = line.split(b'=') + key, val = line.split(b"=") except Exception: continue key = key.strip() @@ -2104,19 +2380,26 @@ def read_micromanager_metadata(fh): """ fh.seek(0) try: - byteorder = {b'II': '<', b'MM': '>'}[fh.read(2)] + byteorder = {b"II": "<", b"MM": ">"}[fh.read(2)] except IndexError: raise ValueError("not a MicroManager TIFF file") results = {} fh.seek(8) - (index_header, index_offset, display_header, display_offset, - comments_header, comments_offset, summary_header, summary_length - ) = struct.unpack(byteorder + "IIIIIIII", fh.read(32)) + ( + index_header, + index_offset, + display_header, + display_offset, + comments_header, + comments_offset, + summary_header, + summary_length, + ) = struct.unpack(byteorder + "IIIIIIII", fh.read(32)) if summary_header != 2355492: raise ValueError("invalid MicroManager summary_header") - results['summary'] = read_json(fh, byteorder, None, summary_length) + results["summary"] = read_json(fh, byteorder, None, summary_length) if index_header != 54773648: raise ValueError("invalid MicroManager index_header") @@ -2124,10 +2407,14 @@ def read_micromanager_metadata(fh): header, count = struct.unpack(byteorder + "II", fh.read(8)) if header != 3453623: raise ValueError("invalid MicroManager index_header") - data = struct.unpack(byteorder + "IIIII"*count, fh.read(20*count)) - results['index_map'] = { - 'channel': data[::5], 'slice': data[1::5], 'frame': data[2::5], - 'position': data[3::5], 'offset': data[4::5]} + data = struct.unpack(byteorder + "IIIII" * count, fh.read(20 * count)) + results["index_map"] = { + "channel": data[::5], + "slice": data[1::5], + "frame": data[2::5], + "position": data[3::5], + "offset": data[4::5], + } if display_header != 483765892: raise ValueError("invalid MicroManager display_header") @@ -2135,7 +2422,7 @@ def read_micromanager_metadata(fh): header, count = struct.unpack(byteorder + "II", fh.read(8)) if header != 347834724: raise ValueError("invalid MicroManager display_header") - results['display_settings'] = read_json(fh, byteorder, None, count) + results["display_settings"] = read_json(fh, byteorder, None, count) if comments_header != 99384722: raise ValueError("invalid MicroManager comments_header") @@ -2143,7 +2430,7 @@ def read_micromanager_metadata(fh): header, count = struct.unpack(byteorder + "II", fh.read(8)) if header != 84720485: raise ValueError("invalid MicroManager comments_header") - results['comments'] = read_json(fh, byteorder, None, count) + results["comments"] = read_json(fh, byteorder, None, count) return results @@ -2153,18 +2440,18 @@ def _replace_by(module_function, package=None, warn=True): try: from importlib import import_module except ImportError: - warnings.warn('Could not import module importlib') + warnings.warn("Could not import module importlib") return lambda func: func def decorate(func, module_function=module_function, warn=warn): try: - module, function = module_function.split('.') + module, function = module_function.split(".") if not package: module = import_module(module) else: - module = import_module('.' + module, package=package) + module = import_module("." + module, package=package) func, oldfunc = getattr(module, function), func - globals()['__old_' + func.__name__] = oldfunc + globals()["__old_" + func.__name__] = oldfunc except Exception: if warn: warnings.warn("failed to import %s" % module_function) @@ -2173,14 +2460,14 @@ def decorate(func, module_function=module_function, warn=warn): return decorate -@_replace_by('_tifffile.decodepackbits') +@_replace_by("_tifffile.decodepackbits") def decodepackbits(encoded): """Decompress PackBits encoded byte string. PackBits is a simple byte-oriented run-length compression scheme. """ - func = ord if sys.version[0] == '2' else lambda x: x + func = ord if sys.version[0] == "2" else lambda x: x result = [] result_extend = result.extend i = 0 @@ -2189,17 +2476,17 @@ def decodepackbits(encoded): n = func(encoded[i]) + 1 i += 1 if n < 129: - result_extend(encoded[i:i+n]) + result_extend(encoded[i : i + n]) i += n elif n > 129: - result_extend(encoded[i:i+1] * (258-n)) + result_extend(encoded[i : i + 1] * (258 - n)) i += 1 except IndexError: pass - return b''.join(result) if sys.version[0] == '2' else bytes(result) + return b"".join(result) if sys.version[0] == "2" else bytes(result) -@_replace_by('_tifffile.decodelzw') +@_replace_by("_tifffile.decodelzw") def decodelzw(encoded): """Decompress LZW (Lempel-Ziv-Welch) encoded TIFF strip (byte string). @@ -2213,7 +2500,7 @@ def decodelzw(encoded): bitcount_max = len_encoded * 8 unpack = struct.unpack - if sys.version[0] == '2': + if sys.version[0] == "2": newtable = [chr(i) for i in range(256)] else: newtable = [bytes([i]) for i in range(256)] @@ -2222,20 +2509,21 @@ def decodelzw(encoded): def next_code(): """Return integer of `bitw` bits at `bitcount` position in encoded.""" start = bitcount // 8 - s = encoded[start:start+4] + s = encoded[start : start + 4] try: - code = unpack('>I', s)[0] + code = unpack(">I", s)[0] except Exception: - code = unpack('>I', s + b'\x00'*(4-len(s)))[0] + code = unpack(">I", s + b"\x00" * (4 - len(s)))[0] code <<= bitcount % 8 code &= mask return code >> shr switchbitch = { # code: bit-width, shr-bits, bit-mask - 255: (9, 23, int(9*'1'+'0'*23, 2)), - 511: (10, 22, int(10*'1'+'0'*22, 2)), - 1023: (11, 21, int(11*'1'+'0'*21, 2)), - 2047: (12, 20, int(12*'1'+'0'*20, 2)), } + 255: (9, 23, int(9 * "1" + "0" * 23, 2)), + 511: (10, 22, int(10 * "1" + "0" * 22, 2)), + 1023: (11, 21, int(11 * "1" + "0" * 21, 2)), + 2047: (12, 20, int(12 * "1" + "0" * 20, 2)), + } bitw, shr, mask = switchbitch[255] bitcount = 0 @@ -2281,12 +2569,13 @@ def next_code(): if code != 257: warnings.warn( - "decodelzw encountered unexpected end of stream (code %i)" % code) + "decodelzw encountered unexpected end of stream (code %i)" % code + ) - return b''.join(result) + return b"".join(result) -@_replace_by('_tifffile.unpackints') +@_replace_by("_tifffile.unpackints") def unpackints(data, dtype, itemsize, runlen=0): """Decompress byte string to array of integers of any bit size <= 32. @@ -2303,7 +2592,7 @@ def unpackints(data, dtype, itemsize, runlen=0): """ if itemsize == 1: # bitarray - data = numpy.fromstring(data, '|B') + data = numpy.fromstring(data, "|B") data = numpy.unpackbits(data) if runlen % 8: data = data.reshape(-1, runlen + (8 - runlen % 8)) @@ -2323,34 +2612,34 @@ def unpackints(data, dtype, itemsize, runlen=0): raise ValueError("dtype.itemsize too small") if runlen == 0: runlen = len(data) // itembytes - skipbits = runlen*itemsize % 8 + skipbits = runlen * itemsize % 8 if skipbits: skipbits = 8 - skipbits - shrbits = itembytes*8 - itemsize - bitmask = int(itemsize*'1'+'0'*shrbits, 2) - dtypestr = '>' + dtype.char # dtype always big endian? + shrbits = itembytes * 8 - itemsize + bitmask = int(itemsize * "1" + "0" * shrbits, 2) + dtypestr = ">" + dtype.char # dtype always big endian? unpack = struct.unpack - l = runlen * (len(data)*8 // (runlen*itemsize + skipbits)) - result = numpy.empty((l, ), dtype) + l = runlen * (len(data) * 8 // (runlen * itemsize + skipbits)) + result = numpy.empty((l,), dtype) bitcount = 0 for i in range(len(result)): start = bitcount // 8 - s = data[start:start+itembytes] + s = data[start : start + itembytes] try: code = unpack(dtypestr, s)[0] except Exception: - code = unpack(dtypestr, s + b'\x00'*(itembytes-len(s)))[0] + code = unpack(dtypestr, s + b"\x00" * (itembytes - len(s)))[0] code <<= bitcount % 8 code &= bitmask result[i] = code >> shrbits bitcount += itemsize - if (i+1) % runlen == 0: + if (i + 1) % runlen == 0: bitcount += skipbits return result -def unpackrgb(data, dtype='= bits) - data = numpy.fromstring(data, dtype.byteorder+dt) + dt = next(i for i in "BHI" if numpy.dtype(i).itemsize * 8 >= bits) + data = numpy.fromstring(data, dtype.byteorder + dt) result = numpy.empty((data.size, len(bitspersample)), dtype.char) for i, bps in enumerate(bitspersample): - t = data >> int(numpy.sum(bitspersample[i+1:])) - t &= int('0b'+'1'*bps, 2) + t = data >> int(numpy.sum(bitspersample[i + 1 :])) + t &= int("0b" + "1" * bps, 2) if rescale: o = ((dtype.itemsize * 8) // bps + 1) * bps if o > data.dtype.itemsize * 8: - t = t.astype('I') + t = t.astype("I") t *= (2**o - 1) // (2**bps - 1) - t //= 2**(o - (dtype.itemsize * 8)) + t //= 2 ** (o - (dtype.itemsize * 8)) result[:, i] = t return result.reshape(-1) @@ -2416,25 +2707,25 @@ def reorient(image, orientation): """ o = TIFF_ORIENTATIONS.get(orientation, orientation) - if o == 'top_left': + if o == "top_left": return image - elif o == 'top_right': + elif o == "top_right": return image[..., ::-1, :] - elif o == 'bottom_left': + elif o == "bottom_left": return image[..., ::-1, :, :] - elif o == 'bottom_right': + elif o == "bottom_right": return image[..., ::-1, ::-1, :] - elif o == 'left_top': + elif o == "left_top": return numpy.swapaxes(image, -3, -2) - elif o == 'right_top': + elif o == "right_top": return numpy.swapaxes(image, -3, -2)[..., ::-1, :] - elif o == 'left_bottom': + elif o == "left_bottom": return numpy.swapaxes(image, -3, -2)[..., ::-1, :, :] - elif o == 'right_bottom': + elif o == "right_bottom": return numpy.swapaxes(image, -3, -2)[..., ::-1, ::-1, :] -def numpy_fromfile(arg, dtype=float, count=-1, sep=''): +def numpy_fromfile(arg, dtype=float, count=-1, sep=""): """Return array from data in binary file. Work around numpy issue #2230, "numpy.fromfile does not accept StringIO @@ -2454,13 +2745,13 @@ def numpy_fromfile(arg, dtype=float, count=-1, sep=''): def stripnull(string): """Return string truncated at first null character.""" - i = string.find(b'\x00') + i = string.find(b"\x00") return string if (i < 0) else string[:i] def format_size(size): """Return file size as string from byte size.""" - for unit in ('B', 'KB', 'MB', 'GB', 'TB'): + for unit in ("B", "KB", "MB", "GB", "TB"): if size < 2048: return "%.f %s" % (size, unit) size /= 1024.0 @@ -2473,9 +2764,11 @@ def natural_sorted(iterable): ['f1', 'f2', 'f10'] """ + def sortkey(x): return [(int(c) if c.isdigit() else c) for c in re.split(numbers, x)] - numbers = re.compile('(\d+)') + + numbers = re.compile("(\d+)") return sorted(iterable, key=sortkey) @@ -2491,7 +2784,7 @@ def datetime_from_timestamp(n, epoch=datetime.datetime.fromordinal(693594)): return epoch + datetime.timedelta(n) -def test_tifffile(directory='testimages', verbose=True): +def test_tifffile(directory="testimages", verbose=True): """Read all images in directory. Print error message on failure. Examples @@ -2502,15 +2795,15 @@ def test_tifffile(directory='testimages', verbose=True): successful = 0 failed = 0 start = time.time() - for f in glob.glob(os.path.join(directory, '*.*')): + for f in glob.glob(os.path.join(directory, "*.*")): if verbose: - print("\n%s>\n" % f.lower(), end='') + print("\n%s>\n" % f.lower(), end="") t0 = time.time() try: tif = TiffFile(f, multifile=True) except Exception as e: if not verbose: - print(f, end=' ') + print(f, end=" ") print("ERROR:", e) failed += 1 continue @@ -2521,7 +2814,7 @@ def test_tifffile(directory='testimages', verbose=True): img = tif[0].asarray() except Exception as e: if not verbose: - print(f, end=' ') + print(f, end=" ") print("ERROR:", e) failed += 1 continue @@ -2529,441 +2822,450 @@ def test_tifffile(directory='testimages', verbose=True): tif.close() successful += 1 if verbose: - print("%s, %s %s, %s, %.0f ms" % ( - str(tif), str(img.shape), img.dtype, tif[0].compression, - (time.time()-t0) * 1e3)) + print( + "%s, %s %s, %s, %.0f ms" + % ( + str(tif), + str(img.shape), + img.dtype, + tif[0].compression, + (time.time() - t0) * 1e3, + ) + ) if verbose: - print("\nSuccessfully read %i of %i files in %.3f s\n" % ( - successful, successful+failed, time.time()-start)) + print( + "\nSuccessfully read %i of %i files in %.3f s\n" + % (successful, successful + failed, time.time() - start) + ) class TIFF_SUBFILE_TYPES(object): def __getitem__(self, key): result = [] if key & 1: - result.append('reduced_image') + result.append("reduced_image") if key & 2: - result.append('page') + result.append("page") if key & 4: - result.append('mask') + result.append("mask") return tuple(result) TIFF_PHOTOMETRICS = { - 0: 'miniswhite', - 1: 'minisblack', - 2: 'rgb', - 3: 'palette', - 4: 'mask', - 5: 'separated', - 6: 'cielab', - 7: 'icclab', - 8: 'itulab', - 32844: 'logl', - 32845: 'logluv', + 0: "miniswhite", + 1: "minisblack", + 2: "rgb", + 3: "palette", + 4: "mask", + 5: "separated", + 6: "cielab", + 7: "icclab", + 8: "itulab", + 32844: "logl", + 32845: "logluv", } TIFF_COMPESSIONS = { 1: None, - 2: 'ccittrle', - 3: 'ccittfax3', - 4: 'ccittfax4', - 5: 'lzw', - 6: 'ojpeg', - 7: 'jpeg', - 8: 'adobe_deflate', - 9: 't85', - 10: 't43', - 32766: 'next', - 32771: 'ccittrlew', - 32773: 'packbits', - 32809: 'thunderscan', - 32895: 'it8ctpad', - 32896: 'it8lw', - 32897: 'it8mp', - 32898: 'it8bl', - 32908: 'pixarfilm', - 32909: 'pixarlog', - 32946: 'deflate', - 32947: 'dcs', - 34661: 'jbig', - 34676: 'sgilog', - 34677: 'sgilog24', - 34712: 'jp2000', - 34713: 'nef', + 2: "ccittrle", + 3: "ccittfax3", + 4: "ccittfax4", + 5: "lzw", + 6: "ojpeg", + 7: "jpeg", + 8: "adobe_deflate", + 9: "t85", + 10: "t43", + 32766: "next", + 32771: "ccittrlew", + 32773: "packbits", + 32809: "thunderscan", + 32895: "it8ctpad", + 32896: "it8lw", + 32897: "it8mp", + 32898: "it8bl", + 32908: "pixarfilm", + 32909: "pixarlog", + 32946: "deflate", + 32947: "dcs", + 34661: "jbig", + 34676: "sgilog", + 34677: "sgilog24", + 34712: "jp2000", + 34713: "nef", } TIFF_DECOMPESSORS = { None: lambda x: x, - 'adobe_deflate': zlib.decompress, - 'deflate': zlib.decompress, - 'packbits': decodepackbits, - 'lzw': decodelzw, + "adobe_deflate": zlib.decompress, + "deflate": zlib.decompress, + "packbits": decodepackbits, + "lzw": decodelzw, } TIFF_DATA_TYPES = { - 1: '1B', # BYTE 8-bit unsigned integer. - 2: '1s', # ASCII 8-bit byte that contains a 7-bit ASCII code; - # the last byte must be NULL (binary zero). - 3: '1H', # SHORT 16-bit (2-byte) unsigned integer - 4: '1I', # LONG 32-bit (4-byte) unsigned integer. - 5: '2I', # RATIONAL Two LONGs: the first represents the numerator of - # a fraction; the second, the denominator. - 6: '1b', # SBYTE An 8-bit signed (twos-complement) integer. - 7: '1B', # UNDEFINED An 8-bit byte that may contain anything, - # depending on the definition of the field. - 8: '1h', # SSHORT A 16-bit (2-byte) signed (twos-complement) integer. - 9: '1i', # SLONG A 32-bit (4-byte) signed (twos-complement) integer. - 10: '2i', # SRATIONAL Two SLONGs: the first represents the numerator - # of a fraction, the second the denominator. - 11: '1f', # FLOAT Single precision (4-byte) IEEE format. - 12: '1d', # DOUBLE Double precision (8-byte) IEEE format. - 13: '1I', # IFD unsigned 4 byte IFD offset. - #14: '', # UNICODE - #15: '', # COMPLEX - 16: '1Q', # LONG8 unsigned 8 byte integer (BigTiff) - 17: '1q', # SLONG8 signed 8 byte integer (BigTiff) - 18: '1Q', # IFD8 unsigned 8 byte IFD offset (BigTiff) + 1: "1B", # BYTE 8-bit unsigned integer. + 2: "1s", # ASCII 8-bit byte that contains a 7-bit ASCII code; + # the last byte must be NULL (binary zero). + 3: "1H", # SHORT 16-bit (2-byte) unsigned integer + 4: "1I", # LONG 32-bit (4-byte) unsigned integer. + 5: "2I", # RATIONAL Two LONGs: the first represents the numerator of + # a fraction; the second, the denominator. + 6: "1b", # SBYTE An 8-bit signed (twos-complement) integer. + 7: "1B", # UNDEFINED An 8-bit byte that may contain anything, + # depending on the definition of the field. + 8: "1h", # SSHORT A 16-bit (2-byte) signed (twos-complement) integer. + 9: "1i", # SLONG A 32-bit (4-byte) signed (twos-complement) integer. + 10: "2i", # SRATIONAL Two SLONGs: the first represents the numerator + # of a fraction, the second the denominator. + 11: "1f", # FLOAT Single precision (4-byte) IEEE format. + 12: "1d", # DOUBLE Double precision (8-byte) IEEE format. + 13: "1I", # IFD unsigned 4 byte IFD offset. + # 14: '', # UNICODE + # 15: '', # COMPLEX + 16: "1Q", # LONG8 unsigned 8 byte integer (BigTiff) + 17: "1q", # SLONG8 signed 8 byte integer (BigTiff) + 18: "1Q", # IFD8 unsigned 8 byte IFD offset (BigTiff) } TIFF_SAMPLE_FORMATS = { - 1: 'uint', - 2: 'int', - 3: 'float', - #4: 'void', - #5: 'complex_int', - 6: 'complex', + 1: "uint", + 2: "int", + 3: "float", + # 4: 'void', + # 5: 'complex_int', + 6: "complex", } TIFF_SAMPLE_DTYPES = { - ('uint', 1): '?', # bitmap - ('uint', 2): 'B', - ('uint', 3): 'B', - ('uint', 4): 'B', - ('uint', 5): 'B', - ('uint', 6): 'B', - ('uint', 7): 'B', - ('uint', 8): 'B', - ('uint', 9): 'H', - ('uint', 10): 'H', - ('uint', 11): 'H', - ('uint', 12): 'H', - ('uint', 13): 'H', - ('uint', 14): 'H', - ('uint', 15): 'H', - ('uint', 16): 'H', - ('uint', 17): 'I', - ('uint', 18): 'I', - ('uint', 19): 'I', - ('uint', 20): 'I', - ('uint', 21): 'I', - ('uint', 22): 'I', - ('uint', 23): 'I', - ('uint', 24): 'I', - ('uint', 25): 'I', - ('uint', 26): 'I', - ('uint', 27): 'I', - ('uint', 28): 'I', - ('uint', 29): 'I', - ('uint', 30): 'I', - ('uint', 31): 'I', - ('uint', 32): 'I', - ('uint', 64): 'Q', - ('int', 8): 'b', - ('int', 16): 'h', - ('int', 32): 'i', - ('int', 64): 'q', - ('float', 16): 'e', - ('float', 32): 'f', - ('float', 64): 'd', - ('complex', 64): 'F', - ('complex', 128): 'D', - ('uint', (5, 6, 5)): 'B', + ("uint", 1): "?", # bitmap + ("uint", 2): "B", + ("uint", 3): "B", + ("uint", 4): "B", + ("uint", 5): "B", + ("uint", 6): "B", + ("uint", 7): "B", + ("uint", 8): "B", + ("uint", 9): "H", + ("uint", 10): "H", + ("uint", 11): "H", + ("uint", 12): "H", + ("uint", 13): "H", + ("uint", 14): "H", + ("uint", 15): "H", + ("uint", 16): "H", + ("uint", 17): "I", + ("uint", 18): "I", + ("uint", 19): "I", + ("uint", 20): "I", + ("uint", 21): "I", + ("uint", 22): "I", + ("uint", 23): "I", + ("uint", 24): "I", + ("uint", 25): "I", + ("uint", 26): "I", + ("uint", 27): "I", + ("uint", 28): "I", + ("uint", 29): "I", + ("uint", 30): "I", + ("uint", 31): "I", + ("uint", 32): "I", + ("uint", 64): "Q", + ("int", 8): "b", + ("int", 16): "h", + ("int", 32): "i", + ("int", 64): "q", + ("float", 16): "e", + ("float", 32): "f", + ("float", 64): "d", + ("complex", 64): "F", + ("complex", 128): "D", + ("uint", (5, 6, 5)): "B", } TIFF_ORIENTATIONS = { - 1: 'top_left', - 2: 'top_right', - 3: 'bottom_right', - 4: 'bottom_left', - 5: 'left_top', - 6: 'right_top', - 7: 'right_bottom', - 8: 'left_bottom', + 1: "top_left", + 2: "top_right", + 3: "bottom_right", + 4: "bottom_left", + 5: "left_top", + 6: "right_top", + 7: "right_bottom", + 8: "left_bottom", } AXES_LABELS = { - 'X': 'width', - 'Y': 'height', - 'Z': 'depth', - 'S': 'sample', # rgb(a) - 'P': 'plane', # page - 'T': 'time', - 'C': 'channel', # color, emission wavelength - 'A': 'angle', - 'F': 'phase', - 'R': 'tile', # region, point - 'H': 'lifetime', # histogram - 'E': 'lambda', # excitation wavelength - 'L': 'exposure', # lux - 'V': 'event', - 'Q': 'other', + "X": "width", + "Y": "height", + "Z": "depth", + "S": "sample", # rgb(a) + "P": "plane", # page + "T": "time", + "C": "channel", # color, emission wavelength + "A": "angle", + "F": "phase", + "R": "tile", # region, point + "H": "lifetime", # histogram + "E": "lambda", # excitation wavelength + "L": "exposure", # lux + "V": "event", + "Q": "other", } AXES_LABELS.update(dict((v, k) for k, v in AXES_LABELS.items())) # NIH Image PicHeader v1.63 NIH_IMAGE_HEADER = [ - ('fileid', 'a8'), - ('nlines', 'i2'), - ('pixelsperline', 'i2'), - ('version', 'i2'), - ('oldlutmode', 'i2'), - ('oldncolors', 'i2'), - ('colors', 'u1', (3, 32)), - ('oldcolorstart', 'i2'), - ('colorwidth', 'i2'), - ('extracolors', 'u2', (6, 3)), - ('nextracolors', 'i2'), - ('foregroundindex', 'i2'), - ('backgroundindex', 'i2'), - ('xscale', 'f8'), - ('_x0', 'i2'), - ('_x1', 'i2'), - ('units_t', 'i2'), - ('p1', [('x', 'i2'), ('y', 'i2')]), - ('p2', [('x', 'i2'), ('y', 'i2')]), - ('curvefit_t', 'i2'), - ('ncoefficients', 'i2'), - ('coeff', 'f8', 6), - ('_um_len', 'u1'), - ('um', 'a15'), - ('_x2', 'u1'), - ('binarypic', 'b1'), - ('slicestart', 'i2'), - ('sliceend', 'i2'), - ('scalemagnification', 'f4'), - ('nslices', 'i2'), - ('slicespacing', 'f4'), - ('currentslice', 'i2'), - ('frameinterval', 'f4'), - ('pixelaspectratio', 'f4'), - ('colorstart', 'i2'), - ('colorend', 'i2'), - ('ncolors', 'i2'), - ('fill1', '3u2'), - ('fill2', '3u2'), - ('colortable_t', 'u1'), - ('lutmode_t', 'u1'), - ('invertedtable', 'b1'), - ('zeroclip', 'b1'), - ('_xunit_len', 'u1'), - ('xunit', 'a11'), - ('stacktype_t', 'i2'), + ("fileid", "a8"), + ("nlines", "i2"), + ("pixelsperline", "i2"), + ("version", "i2"), + ("oldlutmode", "i2"), + ("oldncolors", "i2"), + ("colors", "u1", (3, 32)), + ("oldcolorstart", "i2"), + ("colorwidth", "i2"), + ("extracolors", "u2", (6, 3)), + ("nextracolors", "i2"), + ("foregroundindex", "i2"), + ("backgroundindex", "i2"), + ("xscale", "f8"), + ("_x0", "i2"), + ("_x1", "i2"), + ("units_t", "i2"), + ("p1", [("x", "i2"), ("y", "i2")]), + ("p2", [("x", "i2"), ("y", "i2")]), + ("curvefit_t", "i2"), + ("ncoefficients", "i2"), + ("coeff", "f8", 6), + ("_um_len", "u1"), + ("um", "a15"), + ("_x2", "u1"), + ("binarypic", "b1"), + ("slicestart", "i2"), + ("sliceend", "i2"), + ("scalemagnification", "f4"), + ("nslices", "i2"), + ("slicespacing", "f4"), + ("currentslice", "i2"), + ("frameinterval", "f4"), + ("pixelaspectratio", "f4"), + ("colorstart", "i2"), + ("colorend", "i2"), + ("ncolors", "i2"), + ("fill1", "3u2"), + ("fill2", "3u2"), + ("colortable_t", "u1"), + ("lutmode_t", "u1"), + ("invertedtable", "b1"), + ("zeroclip", "b1"), + ("_xunit_len", "u1"), + ("xunit", "a11"), + ("stacktype_t", "i2"), ] -#NIH_COLORTABLE_TYPE = ( +# NIH_COLORTABLE_TYPE = ( # 'CustomTable', 'AppleDefault', 'Pseudo20', 'Pseudo32', 'Rainbow', # 'Fire1', 'Fire2', 'Ice', 'Grays', 'Spectrum') -#NIH_LUTMODE_TYPE = ( +# NIH_LUTMODE_TYPE = ( # 'PseudoColor', 'OldAppleDefault', 'OldSpectrum', 'GrayScale', # 'ColorLut', 'CustomGrayscale') -#NIH_CURVEFIT_TYPE = ( +# NIH_CURVEFIT_TYPE = ( # 'StraightLine', 'Poly2', 'Poly3', 'Poly4', 'Poly5', 'ExpoFit', # 'PowerFit', 'LogFit', 'RodbardFit', 'SpareFit1', 'Uncalibrated', # 'UncalibratedOD') -#NIH_UNITS_TYPE = ( +# NIH_UNITS_TYPE = ( # 'Nanometers', 'Micrometers', 'Millimeters', 'Centimeters', 'Meters', # 'Kilometers', 'Inches', 'Feet', 'Miles', 'Pixels', 'OtherUnits') -#NIH_STACKTYPE_TYPE = ( +# NIH_STACKTYPE_TYPE = ( # 'VolumeStack', 'RGBStack', 'MovieStack', 'HSVStack') # MetaMorph STK tags MM_TAG_IDS = { - 0: 'auto_scale', - 1: 'min_scale', - 2: 'max_scale', - 3: 'spatial_calibration', - #4: 'x_calibration', - #5: 'y_calibration', - #6: 'calibration_units', - #7: 'name', - 8: 'thresh_state', - 9: 'thresh_state_red', - 11: 'thresh_state_green', - 12: 'thresh_state_blue', - 13: 'thresh_state_lo', - 14: 'thresh_state_hi', - 15: 'zoom', - #16: 'create_time', - #17: 'last_saved_time', - 18: 'current_buffer', - 19: 'gray_fit', - 20: 'gray_point_count', - #21: 'gray_x', - #22: 'gray_y', - #23: 'gray_min', - #24: 'gray_max', - #25: 'gray_unit_name', - 26: 'standard_lut', - 27: 'wavelength', - #28: 'stage_position', - #29: 'camera_chip_offset', - #30: 'overlay_mask', - #31: 'overlay_compress', - #32: 'overlay', - #33: 'special_overlay_mask', - #34: 'special_overlay_compress', - #35: 'special_overlay', - 36: 'image_property', - #37: 'stage_label', - #38: 'autoscale_lo_info', - #39: 'autoscale_hi_info', - #40: 'absolute_z', - #41: 'absolute_z_valid', - #42: 'gamma', - #43: 'gamma_red', - #44: 'gamma_green', - #45: 'gamma_blue', - #46: 'camera_bin', - 47: 'new_lut', - #48: 'image_property_ex', - 49: 'plane_property', - #50: 'user_lut_table', - 51: 'red_autoscale_info', - #52: 'red_autoscale_lo_info', - #53: 'red_autoscale_hi_info', - 54: 'red_minscale_info', - 55: 'red_maxscale_info', - 56: 'green_autoscale_info', - #57: 'green_autoscale_lo_info', - #58: 'green_autoscale_hi_info', - 59: 'green_minscale_info', - 60: 'green_maxscale_info', - 61: 'blue_autoscale_info', - #62: 'blue_autoscale_lo_info', - #63: 'blue_autoscale_hi_info', - 64: 'blue_min_scale_info', - 65: 'blue_max_scale_info', - #66: 'overlay_plane_color' + 0: "auto_scale", + 1: "min_scale", + 2: "max_scale", + 3: "spatial_calibration", + # 4: 'x_calibration', + # 5: 'y_calibration', + # 6: 'calibration_units', + # 7: 'name', + 8: "thresh_state", + 9: "thresh_state_red", + 11: "thresh_state_green", + 12: "thresh_state_blue", + 13: "thresh_state_lo", + 14: "thresh_state_hi", + 15: "zoom", + # 16: 'create_time', + # 17: 'last_saved_time', + 18: "current_buffer", + 19: "gray_fit", + 20: "gray_point_count", + # 21: 'gray_x', + # 22: 'gray_y', + # 23: 'gray_min', + # 24: 'gray_max', + # 25: 'gray_unit_name', + 26: "standard_lut", + 27: "wavelength", + # 28: 'stage_position', + # 29: 'camera_chip_offset', + # 30: 'overlay_mask', + # 31: 'overlay_compress', + # 32: 'overlay', + # 33: 'special_overlay_mask', + # 34: 'special_overlay_compress', + # 35: 'special_overlay', + 36: "image_property", + # 37: 'stage_label', + # 38: 'autoscale_lo_info', + # 39: 'autoscale_hi_info', + # 40: 'absolute_z', + # 41: 'absolute_z_valid', + # 42: 'gamma', + # 43: 'gamma_red', + # 44: 'gamma_green', + # 45: 'gamma_blue', + # 46: 'camera_bin', + 47: "new_lut", + # 48: 'image_property_ex', + 49: "plane_property", + # 50: 'user_lut_table', + 51: "red_autoscale_info", + # 52: 'red_autoscale_lo_info', + # 53: 'red_autoscale_hi_info', + 54: "red_minscale_info", + 55: "red_maxscale_info", + 56: "green_autoscale_info", + # 57: 'green_autoscale_lo_info', + # 58: 'green_autoscale_hi_info', + 59: "green_minscale_info", + 60: "green_maxscale_info", + 61: "blue_autoscale_info", + # 62: 'blue_autoscale_lo_info', + # 63: 'blue_autoscale_hi_info', + 64: "blue_min_scale_info", + 65: "blue_max_scale_info", + # 66: 'overlay_plane_color' } # Olympus FluoView MM_DIMENSION = [ - ('name', 'a16'), - ('size', 'i4'), - ('origin', 'f8'), - ('resolution', 'f8'), - ('unit', 'a64'), + ("name", "a16"), + ("size", "i4"), + ("origin", "f8"), + ("resolution", "f8"), + ("unit", "a64"), ] MM_HEADER = [ - ('header_flag', 'i2'), - ('image_type', 'u1'), - ('image_name', 'a257'), - ('offset_data', 'u4'), - ('palette_size', 'i4'), - ('offset_palette0', 'u4'), - ('offset_palette1', 'u4'), - ('comment_size', 'i4'), - ('offset_comment', 'u4'), - ('dimensions', MM_DIMENSION, 10), - ('offset_position', 'u4'), - ('map_type', 'i2'), - ('map_min', 'f8'), - ('map_max', 'f8'), - ('min_value', 'f8'), - ('max_value', 'f8'), - ('offset_map', 'u4'), - ('gamma', 'f8'), - ('offset', 'f8'), - ('gray_channel', MM_DIMENSION), - ('offset_thumbnail', 'u4'), - ('voice_field', 'i4'), - ('offset_voice_field', 'u4'), + ("header_flag", "i2"), + ("image_type", "u1"), + ("image_name", "a257"), + ("offset_data", "u4"), + ("palette_size", "i4"), + ("offset_palette0", "u4"), + ("offset_palette1", "u4"), + ("comment_size", "i4"), + ("offset_comment", "u4"), + ("dimensions", MM_DIMENSION, 10), + ("offset_position", "u4"), + ("map_type", "i2"), + ("map_min", "f8"), + ("map_max", "f8"), + ("min_value", "f8"), + ("max_value", "f8"), + ("offset_map", "u4"), + ("gamma", "f8"), + ("offset", "f8"), + ("gray_channel", MM_DIMENSION), + ("offset_thumbnail", "u4"), + ("voice_field", "i4"), + ("offset_voice_field", "u4"), ] # Carl Zeiss LSM CZ_LSM_INFO = [ - ('magic_number', 'i4'), - ('structure_size', 'i4'), - ('dimension_x', 'i4'), - ('dimension_y', 'i4'), - ('dimension_z', 'i4'), - ('dimension_channels', 'i4'), - ('dimension_time', 'i4'), - ('dimension_data_type', 'i4'), - ('thumbnail_x', 'i4'), - ('thumbnail_y', 'i4'), - ('voxel_size_x', 'f8'), - ('voxel_size_y', 'f8'), - ('voxel_size_z', 'f8'), - ('origin_x', 'f8'), - ('origin_y', 'f8'), - ('origin_z', 'f8'), - ('scan_type', 'u2'), - ('spectral_scan', 'u2'), - ('data_type', 'u4'), - ('offset_vector_overlay', 'u4'), - ('offset_input_lut', 'u4'), - ('offset_output_lut', 'u4'), - ('offset_channel_colors', 'u4'), - ('time_interval', 'f8'), - ('offset_channel_data_types', 'u4'), - ('offset_scan_information', 'u4'), - ('offset_ks_data', 'u4'), - ('offset_time_stamps', 'u4'), - ('offset_event_list', 'u4'), - ('offset_roi', 'u4'), - ('offset_bleach_roi', 'u4'), - ('offset_next_recording', 'u4'), - ('display_aspect_x', 'f8'), - ('display_aspect_y', 'f8'), - ('display_aspect_z', 'f8'), - ('display_aspect_time', 'f8'), - ('offset_mean_of_roi_overlay', 'u4'), - ('offset_topo_isoline_overlay', 'u4'), - ('offset_topo_profile_overlay', 'u4'), - ('offset_linescan_overlay', 'u4'), - ('offset_toolbar_flags', 'u4'), + ("magic_number", "i4"), + ("structure_size", "i4"), + ("dimension_x", "i4"), + ("dimension_y", "i4"), + ("dimension_z", "i4"), + ("dimension_channels", "i4"), + ("dimension_time", "i4"), + ("dimension_data_type", "i4"), + ("thumbnail_x", "i4"), + ("thumbnail_y", "i4"), + ("voxel_size_x", "f8"), + ("voxel_size_y", "f8"), + ("voxel_size_z", "f8"), + ("origin_x", "f8"), + ("origin_y", "f8"), + ("origin_z", "f8"), + ("scan_type", "u2"), + ("spectral_scan", "u2"), + ("data_type", "u4"), + ("offset_vector_overlay", "u4"), + ("offset_input_lut", "u4"), + ("offset_output_lut", "u4"), + ("offset_channel_colors", "u4"), + ("time_interval", "f8"), + ("offset_channel_data_types", "u4"), + ("offset_scan_information", "u4"), + ("offset_ks_data", "u4"), + ("offset_time_stamps", "u4"), + ("offset_event_list", "u4"), + ("offset_roi", "u4"), + ("offset_bleach_roi", "u4"), + ("offset_next_recording", "u4"), + ("display_aspect_x", "f8"), + ("display_aspect_y", "f8"), + ("display_aspect_z", "f8"), + ("display_aspect_time", "f8"), + ("offset_mean_of_roi_overlay", "u4"), + ("offset_topo_isoline_overlay", "u4"), + ("offset_topo_profile_overlay", "u4"), + ("offset_linescan_overlay", "u4"), + ("offset_toolbar_flags", "u4"), ] # Import functions for LSM_INFO sub-records CZ_LSM_INFO_READERS = { - 'scan_information': read_cz_lsm_scan_info, - 'time_stamps': read_cz_lsm_time_stamps, - 'event_list': read_cz_lsm_event_list, + "scan_information": read_cz_lsm_scan_info, + "time_stamps": read_cz_lsm_time_stamps, + "event_list": read_cz_lsm_event_list, } # Map cz_lsm_info.scan_type to dimension order CZ_SCAN_TYPES = { - 0: 'XYZCT', # x-y-z scan - 1: 'XYZCT', # z scan (x-z plane) - 2: 'XYZCT', # line scan - 3: 'XYTCZ', # time series x-y - 4: 'XYZTC', # time series x-z - 5: 'XYTCZ', # time series 'Mean of ROIs' - 6: 'XYZTC', # time series x-y-z - 7: 'XYCTZ', # spline scan - 8: 'XYCZT', # spline scan x-z - 9: 'XYTCZ', # time series spline plane x-z - 10: 'XYZCT', # point mode + 0: "XYZCT", # x-y-z scan + 1: "XYZCT", # z scan (x-z plane) + 2: "XYZCT", # line scan + 3: "XYTCZ", # time series x-y + 4: "XYZTC", # time series x-z + 5: "XYTCZ", # time series 'Mean of ROIs' + 6: "XYZTC", # time series x-y-z + 7: "XYCTZ", # spline scan + 8: "XYCZT", # spline scan x-z + 9: "XYTCZ", # time series spline plane x-z + 10: "XYZCT", # point mode } # Map dimension codes to cz_lsm_info attribute CZ_DIMENSIONS = { - 'X': 'dimension_x', - 'Y': 'dimension_y', - 'Z': 'dimension_z', - 'C': 'dimension_channels', - 'T': 'dimension_time', + "X": "dimension_x", + "Y": "dimension_y", + "Z": "dimension_z", + "C": "dimension_channels", + "T": "dimension_time", } # Descriptions of cz_lsm_info.data_type CZ_DATA_TYPES = { - 0: 'varying data types', - 2: '12 bit unsigned integer', - 5: '32 bit float', + 0: "varying data types", + 2: "12 bit unsigned integer", + 5: "32 bit float", } CZ_LSM_SCAN_INFO_ARRAYS = { @@ -2971,8 +3273,8 @@ def __getitem__(self, key): 0x30000000: "lasers", 0x60000000: "detectionchannels", 0x80000000: "illuminationchannels", - 0xa0000000: "beamsplitters", - 0xc0000000: "datachannels", + 0xA0000000: "beamsplitters", + 0xC0000000: "datachannels", 0x13000000: "markers", 0x11000000: "timers", } @@ -2982,8 +3284,8 @@ def __getitem__(self, key): 0x50000000: "lasers", 0x70000000: "detectionchannels", 0x90000000: "illuminationchannels", - 0xb0000000: "beamsplitters", - 0xd0000000: "datachannels", + 0xB0000000: "beamsplitters", + 0xD0000000: "datachannels", 0x14000000: "markers", 0x12000000: "timers", } @@ -2998,12 +3300,12 @@ def __getitem__(self, key): 0x10000007: "oledb_recording_scan_type", 0x10000008: "oledb_recording_scan_mode", 0x10000009: "number_of_stacks", - 0x1000000a: "lines_per_plane", - 0x1000000b: "samples_per_line", - 0x1000000c: "planes_per_volume", - 0x1000000d: "images_width", - 0x1000000e: "images_height", - 0x1000000f: "images_number_planes", + 0x1000000A: "lines_per_plane", + 0x1000000B: "samples_per_line", + 0x1000000C: "planes_per_volume", + 0x1000000D: "images_width", + 0x1000000E: "images_height", + 0x1000000F: "images_number_planes", 0x10000010: "images_number_stacks", 0x10000011: "images_number_channels", 0x10000012: "linscan_xy_size", @@ -3014,12 +3316,12 @@ def __getitem__(self, key): 0x10000017: "zoom_y", 0x10000018: "zoom_z", 0x10000019: "sample_0x", - 0x1000001a: "sample_0y", - 0x1000001b: "sample_0z", - 0x1000001c: "sample_spacing", - 0x1000001d: "line_spacing", - 0x1000001e: "plane_spacing", - 0x1000001f: "plane_width", + 0x1000001A: "sample_0y", + 0x1000001B: "sample_0z", + 0x1000001C: "sample_spacing", + 0x1000001D: "line_spacing", + 0x1000001E: "plane_spacing", + 0x1000001F: "plane_width", 0x10000020: "plane_height", 0x10000021: "volume_depth", 0x10000023: "nutation", @@ -3059,11 +3361,11 @@ def __getitem__(self, key): 0x40000005: "sampling_number", 0x40000006: "acquire", 0x40000007: "sample_observation_time", - 0x4000000b: "time_between_stacks", - 0x4000000c: "name", - 0x4000000d: "collimator1_name", - 0x4000000e: "collimator1_position", - 0x4000000f: "collimator2_name", + 0x4000000B: "time_between_stacks", + 0x4000000C: "name", + 0x4000000D: "collimator1_name", + 0x4000000E: "collimator1_position", + 0x4000000F: "collimator2_name", 0x40000010: "collimator2_position", 0x40000011: "is_bleach_track", 0x40000012: "is_bleach_after_scan_number", @@ -3092,9 +3394,9 @@ def __getitem__(self, key): 0x40000037: "id_tubelens", 0x40000038: "id_tubelens_position", 0x40000039: "transmitted_light", - 0x4000003a: "reflected_light", - 0x4000003b: "simultan_grab_and_bleach", - 0x4000003c: "bleach_pixel_time", + 0x4000003A: "reflected_light", + 0x4000003B: "simultan_grab_and_bleach", + 0x4000003C: "bleach_pixel_time", # detection_channels 0x70000001: "integration_mode", 0x70000002: "special_mode", @@ -3105,12 +3407,12 @@ def __getitem__(self, key): 0x70000007: "amplifier_offs_first", 0x70000008: "amplifier_offs_last", 0x70000009: "pinhole_diameter", - 0x7000000a: "counting_trigger", - 0x7000000b: "acquire", - 0x7000000c: "point_detector_name", - 0x7000000d: "amplifier_name", - 0x7000000e: "pinhole_name", - 0x7000000f: "filter_set_name", + 0x7000000A: "counting_trigger", + 0x7000000B: "acquire", + 0x7000000C: "point_detector_name", + 0x7000000D: "amplifier_name", + 0x7000000E: "pinhole_name", + 0x7000000F: "filter_set_name", 0x70000010: "filter_name", 0x70000013: "integrator_name", 0x70000014: "detection_channel_name", @@ -3134,32 +3436,32 @@ def __getitem__(self, key): 0x90000006: "power_bc1", 0x90000007: "power_bc2", # beam_splitters - 0xb0000001: "filter_set", - 0xb0000002: "filter", - 0xb0000003: "name", + 0xB0000001: "filter_set", + 0xB0000002: "filter", + 0xB0000003: "name", # data_channels - 0xd0000001: "name", - 0xd0000003: "acquire", - 0xd0000004: "color", - 0xd0000005: "sample_type", - 0xd0000006: "bits_per_sample", - 0xd0000007: "ratio_type", - 0xd0000008: "ratio_track1", - 0xd0000009: "ratio_track2", - 0xd000000a: "ratio_channel1", - 0xd000000b: "ratio_channel2", - 0xd000000c: "ratio_const1", - 0xd000000d: "ratio_const2", - 0xd000000e: "ratio_const3", - 0xd000000f: "ratio_const4", - 0xd0000010: "ratio_const5", - 0xd0000011: "ratio_const6", - 0xd0000012: "ratio_first_images1", - 0xd0000013: "ratio_first_images2", - 0xd0000014: "dye_name", - 0xd0000015: "dye_folder", - 0xd0000016: "spectrum", - 0xd0000017: "acquire", + 0xD0000001: "name", + 0xD0000003: "acquire", + 0xD0000004: "color", + 0xD0000005: "sample_type", + 0xD0000006: "bits_per_sample", + 0xD0000007: "ratio_type", + 0xD0000008: "ratio_track1", + 0xD0000009: "ratio_track2", + 0xD000000A: "ratio_channel1", + 0xD000000B: "ratio_channel2", + 0xD000000C: "ratio_const1", + 0xD000000D: "ratio_const2", + 0xD000000E: "ratio_const3", + 0xD000000F: "ratio_const4", + 0xD0000010: "ratio_const5", + 0xD0000011: "ratio_const6", + 0xD0000012: "ratio_first_images1", + 0xD0000013: "ratio_first_images2", + 0xD0000014: "dye_name", + 0xD0000015: "dye_folder", + 0xD0000016: "spectrum", + 0xD0000017: "acquire", # markers 0x14000001: "name", 0x14000002: "description", @@ -3177,113 +3479,135 @@ def __getitem__(self, key): # Map TIFF tag code to attribute name, default value, type, count, validator TIFF_TAGS = { - 254: ('new_subfile_type', 0, 4, 1, TIFF_SUBFILE_TYPES()), - 255: ('subfile_type', None, 3, 1, - {0: 'undefined', 1: 'image', 2: 'reduced_image', 3: 'page'}), - 256: ('image_width', None, 4, 1, None), - 257: ('image_length', None, 4, 1, None), - 258: ('bits_per_sample', 1, 3, 1, None), - 259: ('compression', 1, 3, 1, TIFF_COMPESSIONS), - 262: ('photometric', None, 3, 1, TIFF_PHOTOMETRICS), - 266: ('fill_order', 1, 3, 1, {1: 'msb2lsb', 2: 'lsb2msb'}), - 269: ('document_name', None, 2, None, None), - 270: ('image_description', None, 2, None, None), - 271: ('make', None, 2, None, None), - 272: ('model', None, 2, None, None), - 273: ('strip_offsets', None, 4, None, None), - 274: ('orientation', 1, 3, 1, TIFF_ORIENTATIONS), - 277: ('samples_per_pixel', 1, 3, 1, None), - 278: ('rows_per_strip', 2**32-1, 4, 1, None), - 279: ('strip_byte_counts', None, 4, None, None), - 280: ('min_sample_value', None, 3, None, None), - 281: ('max_sample_value', None, 3, None, None), # 2**bits_per_sample - 282: ('x_resolution', None, 5, 1, None), - 283: ('y_resolution', None, 5, 1, None), - 284: ('planar_configuration', 1, 3, 1, {1: 'contig', 2: 'separate'}), - 285: ('page_name', None, 2, None, None), - 286: ('x_position', None, 5, 1, None), - 287: ('y_position', None, 5, 1, None), - 296: ('resolution_unit', 2, 4, 1, {1: 'none', 2: 'inch', 3: 'centimeter'}), - 297: ('page_number', None, 3, 2, None), - 305: ('software', None, 2, None, None), - 306: ('datetime', None, 2, None, None), - 315: ('artist', None, 2, None, None), - 316: ('host_computer', None, 2, None, None), - 317: ('predictor', 1, 3, 1, {1: None, 2: 'horizontal'}), - 320: ('color_map', None, 3, None, None), - 322: ('tile_width', None, 4, 1, None), - 323: ('tile_length', None, 4, 1, None), - 324: ('tile_offsets', None, 4, None, None), - 325: ('tile_byte_counts', None, 4, None, None), - 338: ('extra_samples', None, 3, None, - {0: 'unspecified', 1: 'assocalpha', 2: 'unassalpha'}), - 339: ('sample_format', 1, 3, 1, TIFF_SAMPLE_FORMATS), - 347: ('jpeg_tables', None, None, None, None), - 530: ('ycbcr_subsampling', 1, 3, 2, None), - 531: ('ycbcr_positioning', 1, 3, 1, None), - 32997: ('image_depth', None, 4, 1, None), - 32998: ('tile_depth', None, 4, 1, None), - 33432: ('copyright', None, 1, None, None), - 33445: ('md_file_tag', None, 4, 1, None), - 33446: ('md_scale_pixel', None, 5, 1, None), - 33447: ('md_color_table', None, 3, None, None), - 33448: ('md_lab_name', None, 2, None, None), - 33449: ('md_sample_info', None, 2, None, None), - 33450: ('md_prep_date', None, 2, None, None), - 33451: ('md_prep_time', None, 2, None, None), - 33452: ('md_file_units', None, 2, None, None), - 33550: ('model_pixel_scale', None, 12, 3, None), - 33922: ('model_tie_point', None, 12, None, None), - 37510: ('user_comment', None, None, None, None), - 34665: ('exif_ifd', None, None, 1, None), - 34735: ('geo_key_directory', None, 3, None, None), - 34736: ('geo_double_params', None, 12, None, None), - 34737: ('geo_ascii_params', None, 2, None, None), - 34853: ('gps_ifd', None, None, 1, None), - 42112: ('gdal_metadata', None, 2, None, None), - 42113: ('gdal_nodata', None, 2, None, None), - 50838: ('imagej_byte_counts', None, None, None, None), - 50289: ('mc_xy_position', None, 12, 2, None), - 50290: ('mc_z_position', None, 12, 1, None), - 50291: ('mc_xy_calibration', None, 12, 3, None), - 50292: ('mc_lens_lem_na_n', None, 12, 3, None), - 50293: ('mc_channel_name', None, 1, None, None), - 50294: ('mc_ex_wavelength', None, 12, 1, None), - 50295: ('mc_time_stamp', None, 12, 1, None), - 65200: ('flex_xml', None, 2, None, None), + 254: ("new_subfile_type", 0, 4, 1, TIFF_SUBFILE_TYPES()), + 255: ( + "subfile_type", + None, + 3, + 1, + {0: "undefined", 1: "image", 2: "reduced_image", 3: "page"}, + ), + 256: ("image_width", None, 4, 1, None), + 257: ("image_length", None, 4, 1, None), + 258: ("bits_per_sample", 1, 3, 1, None), + 259: ("compression", 1, 3, 1, TIFF_COMPESSIONS), + 262: ("photometric", None, 3, 1, TIFF_PHOTOMETRICS), + 266: ("fill_order", 1, 3, 1, {1: "msb2lsb", 2: "lsb2msb"}), + 269: ("document_name", None, 2, None, None), + 270: ("image_description", None, 2, None, None), + 271: ("make", None, 2, None, None), + 272: ("model", None, 2, None, None), + 273: ("strip_offsets", None, 4, None, None), + 274: ("orientation", 1, 3, 1, TIFF_ORIENTATIONS), + 277: ("samples_per_pixel", 1, 3, 1, None), + 278: ("rows_per_strip", 2**32 - 1, 4, 1, None), + 279: ("strip_byte_counts", None, 4, None, None), + 280: ("min_sample_value", None, 3, None, None), + 281: ("max_sample_value", None, 3, None, None), # 2**bits_per_sample + 282: ("x_resolution", None, 5, 1, None), + 283: ("y_resolution", None, 5, 1, None), + 284: ("planar_configuration", 1, 3, 1, {1: "contig", 2: "separate"}), + 285: ("page_name", None, 2, None, None), + 286: ("x_position", None, 5, 1, None), + 287: ("y_position", None, 5, 1, None), + 296: ("resolution_unit", 2, 4, 1, {1: "none", 2: "inch", 3: "centimeter"}), + 297: ("page_number", None, 3, 2, None), + 305: ("software", None, 2, None, None), + 306: ("datetime", None, 2, None, None), + 315: ("artist", None, 2, None, None), + 316: ("host_computer", None, 2, None, None), + 317: ("predictor", 1, 3, 1, {1: None, 2: "horizontal"}), + 320: ("color_map", None, 3, None, None), + 322: ("tile_width", None, 4, 1, None), + 323: ("tile_length", None, 4, 1, None), + 324: ("tile_offsets", None, 4, None, None), + 325: ("tile_byte_counts", None, 4, None, None), + 338: ( + "extra_samples", + None, + 3, + None, + {0: "unspecified", 1: "assocalpha", 2: "unassalpha"}, + ), + 339: ("sample_format", 1, 3, 1, TIFF_SAMPLE_FORMATS), + 347: ("jpeg_tables", None, None, None, None), + 530: ("ycbcr_subsampling", 1, 3, 2, None), + 531: ("ycbcr_positioning", 1, 3, 1, None), + 32997: ("image_depth", None, 4, 1, None), + 32998: ("tile_depth", None, 4, 1, None), + 33432: ("copyright", None, 1, None, None), + 33445: ("md_file_tag", None, 4, 1, None), + 33446: ("md_scale_pixel", None, 5, 1, None), + 33447: ("md_color_table", None, 3, None, None), + 33448: ("md_lab_name", None, 2, None, None), + 33449: ("md_sample_info", None, 2, None, None), + 33450: ("md_prep_date", None, 2, None, None), + 33451: ("md_prep_time", None, 2, None, None), + 33452: ("md_file_units", None, 2, None, None), + 33550: ("model_pixel_scale", None, 12, 3, None), + 33922: ("model_tie_point", None, 12, None, None), + 37510: ("user_comment", None, None, None, None), + 34665: ("exif_ifd", None, None, 1, None), + 34735: ("geo_key_directory", None, 3, None, None), + 34736: ("geo_double_params", None, 12, None, None), + 34737: ("geo_ascii_params", None, 2, None, None), + 34853: ("gps_ifd", None, None, 1, None), + 42112: ("gdal_metadata", None, 2, None, None), + 42113: ("gdal_nodata", None, 2, None, None), + 50838: ("imagej_byte_counts", None, None, None, None), + 50289: ("mc_xy_position", None, 12, 2, None), + 50290: ("mc_z_position", None, 12, 1, None), + 50291: ("mc_xy_calibration", None, 12, 3, None), + 50292: ("mc_lens_lem_na_n", None, 12, 3, None), + 50293: ("mc_channel_name", None, 1, None, None), + 50294: ("mc_ex_wavelength", None, 12, 1, None), + 50295: ("mc_time_stamp", None, 12, 1, None), + 65200: ("flex_xml", None, 2, None, None), # code: (attribute name, default value, type, count, validator) } # Map custom TIFF tag codes to attribute names and import functions CUSTOM_TAGS = { - 700: ('xmp', read_bytes), - 34377: ('photoshop', read_numpy), - 33723: ('iptc', read_bytes), - 34675: ('icc_profile', read_numpy), - 33628: ('mm_uic1', read_mm_uic1), - 33629: ('mm_uic2', read_mm_uic2), - 33630: ('mm_uic3', read_mm_uic3), - 33631: ('mm_uic4', read_mm_uic4), - 34361: ('mm_header', read_mm_header), - 34362: ('mm_stamp', read_mm_stamp), - 34386: ('mm_user_block', read_bytes), - 34412: ('cz_lsm_info', read_cz_lsm_info), - 43314: ('nih_image_header', read_nih_image_header), + 700: ("xmp", read_bytes), + 34377: ("photoshop", read_numpy), + 33723: ("iptc", read_bytes), + 34675: ("icc_profile", read_numpy), + 33628: ("mm_uic1", read_mm_uic1), + 33629: ("mm_uic2", read_mm_uic2), + 33630: ("mm_uic3", read_mm_uic3), + 33631: ("mm_uic4", read_mm_uic4), + 34361: ("mm_header", read_mm_header), + 34362: ("mm_stamp", read_mm_stamp), + 34386: ("mm_user_block", read_bytes), + 34412: ("cz_lsm_info", read_cz_lsm_info), + 43314: ("nih_image_header", read_nih_image_header), # 40001: ('mc_ipwinscal', read_bytes), - 40100: ('mc_id_old', read_bytes), - 50288: ('mc_id', read_bytes), - 50296: ('mc_frame_properties', read_bytes), - 50839: ('imagej_metadata', read_bytes), - 51123: ('micromanager_metadata', read_json), + 40100: ("mc_id_old", read_bytes), + 50288: ("mc_id", read_bytes), + 50296: ("mc_frame_properties", read_bytes), + 50839: ("imagej_metadata", read_bytes), + 51123: ("micromanager_metadata", read_json), } # Max line length of printed output PRINT_LINE_LEN = 79 -def imshow(data, title=None, vmin=0, vmax=None, cmap=None, - bitspersample=None, photometric='rgb', interpolation='nearest', - dpi=96, figure=None, subplot=111, maxdim=8192, **kwargs): +def imshow( + data, + title=None, + vmin=0, + vmax=None, + cmap=None, + bitspersample=None, + photometric="rgb", + interpolation="nearest", + dpi=96, + figure=None, + subplot=111, + maxdim=8192, + **kwargs +): """Plot n-dimensional images using matplotlib.pyplot. Return figure, subplot and plot axis. @@ -3307,12 +3631,12 @@ def imshow(data, title=None, vmin=0, vmax=None, cmap=None, Arguments for matplotlib.pyplot.imshow. """ - #if photometric not in ('miniswhite', 'minisblack', 'rgb', 'palette'): + # if photometric not in ('miniswhite', 'minisblack', 'rgb', 'palette'): # raise ValueError("Can't handle %s photometrics" % photometric) # TODO: handle photometric == 'separated' (CMYK) - isrgb = photometric in ('rgb', 'palette') + isrgb = photometric in ("rgb", "palette") data = numpy.atleast_2d(data.squeeze()) - data = data[(slice(0, maxdim), ) * len(data.shape)] + data = data[(slice(0, maxdim),) * len(data.shape)] dims = data.ndim if dims < 2: @@ -3330,12 +3654,12 @@ def imshow(data, title=None, vmin=0, vmax=None, cmap=None, isrgb = isrgb and data.shape[-1] in (3, 4) dims -= 3 if isrgb else 2 - if photometric == 'palette' and isrgb: + if photometric == "palette" and isrgb: datamax = data.max() if datamax > 255: data >>= 8 # possible precision loss - data = data.astype('B') - elif data.dtype.kind in 'ui': + data = data.astype("B") + elif data.dtype.kind in "ui": if not (isrgb and data.dtype.itemsize <= 1) or bitspersample is None: try: bitspersample = int(math.ceil(math.log(data.max(), 2))) @@ -3350,28 +3674,28 @@ def imshow(data, title=None, vmin=0, vmax=None, cmap=None, data <<= 8 - bitspersample elif bitspersample > 8: data >>= bitspersample - 8 # precision loss - data = data.astype('B') - elif data.dtype.kind == 'f': + data = data.astype("B") + elif data.dtype.kind == "f": datamax = data.max() if isrgb and datamax > 1.0: - if data.dtype.char == 'd': - data = data.astype('f') + if data.dtype.char == "d": + data = data.astype("f") data /= datamax - elif data.dtype.kind == 'b': + elif data.dtype.kind == "b": datamax = 1 - elif data.dtype.kind == 'c': + elif data.dtype.kind == "c": raise NotImplementedError("complex type") # TODO: handle complex types if not isrgb: if vmax is None: vmax = datamax if vmin is None: - if data.dtype.kind == 'i': + if data.dtype.kind == "i": dtmin = numpy.iinfo(data.dtype).min vmin = numpy.min(data) if vmin == dtmin: vmin = numpy.min(data > dtmin) - if data.dtype.kind == 'f': + if data.dtype.kind == "f": dtmin = numpy.finfo(data.dtype).min vmin = numpy.min(data) if vmin == dtmin: @@ -3379,37 +3703,54 @@ def imshow(data, title=None, vmin=0, vmax=None, cmap=None, else: vmin = 0 - pyplot = sys.modules['matplotlib.pyplot'] + pyplot = sys.modules["matplotlib.pyplot"] if figure is None: - pyplot.rc('font', family='sans-serif', weight='normal', size=8) - figure = pyplot.figure(dpi=dpi, figsize=(10.3, 6.3), frameon=True, - facecolor='1.0', edgecolor='w') + pyplot.rc("font", family="sans-serif", weight="normal", size=8) + figure = pyplot.figure( + dpi=dpi, + figsize=(10.3, 6.3), + frameon=True, + facecolor="1.0", + edgecolor="w", + ) try: figure.canvas.manager.window.title(title) except Exception: pass - pyplot.subplots_adjust(bottom=0.03*(dims+2), top=0.9, - left=0.1, right=0.95, hspace=0.05, wspace=0.0) + pyplot.subplots_adjust( + bottom=0.03 * (dims + 2), + top=0.9, + left=0.1, + right=0.95, + hspace=0.05, + wspace=0.0, + ) subplot = pyplot.subplot(subplot) if title: try: - title = unicode(title, 'Windows-1252') + title = unicode(title, "Windows-1252") except TypeError: pass pyplot.title(title, size=11) if cmap is None: - if data.dtype.kind in 'ub' and vmin == 0: - cmap = 'gray' + if data.dtype.kind in "ub" and vmin == 0: + cmap = "gray" else: - cmap = 'coolwarm' - if photometric == 'miniswhite': - cmap += '_r' - - image = pyplot.imshow(data[(0, ) * dims].squeeze(), vmin=vmin, vmax=vmax, - cmap=cmap, interpolation=interpolation, **kwargs) + cmap = "coolwarm" + if photometric == "miniswhite": + cmap += "_r" + + image = pyplot.imshow( + data[(0,) * dims].squeeze(), + vmin=vmin, + vmax=vmax, + cmap=cmap, + interpolation=interpolation, + **kwargs + ) if not isrgb: pyplot.colorbar() # panchor=(0.55, 0.5), fraction=0.05 @@ -3420,8 +3761,12 @@ def format_coord(x, y): y = int(y + 0.5) try: if dims: - return "%s @ %s [%4i, %4i]" % (cur_ax_dat[1][y, x], - current, x, y) + return "%s @ %s [%4i, %4i]" % ( + cur_ax_dat[1][y, x], + current, + x, + y, + ) else: return "%s @ [%4i, %4i]" % (data[y, x], x, y) except IndexError: @@ -3430,12 +3775,20 @@ def format_coord(x, y): pyplot.gca().format_coord = format_coord if dims: - current = list((0, ) * dims) + current = list((0,) * dims) cur_ax_dat = [0, data[tuple(current)].squeeze()] - sliders = [pyplot.Slider( - pyplot.axes([0.125, 0.03*(axis+1), 0.725, 0.025]), - 'Dimension %i' % axis, 0, data.shape[axis]-1, 0, facecolor='0.5', - valfmt='%%.0f [%i]' % data.shape[axis]) for axis in range(dims)] + sliders = [ + pyplot.Slider( + pyplot.axes([0.125, 0.03 * (axis + 1), 0.725, 0.025]), + "Dimension %i" % axis, + 0, + data.shape[axis] - 1, + 0, + facecolor="0.5", + valfmt="%%.0f [%i]" % data.shape[axis], + ) + for axis in range(dims) + ] for slider in sliders: slider.drawon = False @@ -3466,22 +3819,22 @@ def on_keypressed(event, data=data, current=current): # callback function for key press event key = event.key axis = cur_ax_dat[0] - if str(key) in '0123456789': + if str(key) in "0123456789": on_changed(key, axis) - elif key == 'right': + elif key == "right": on_changed(current[axis] + 1, axis) - elif key == 'left': + elif key == "left": on_changed(current[axis] - 1, axis) - elif key == 'up': - cur_ax_dat[0] = 0 if axis == len(data.shape)-1 else axis + 1 - elif key == 'down': - cur_ax_dat[0] = len(data.shape)-1 if axis == 0 else axis - 1 - elif key == 'end': + elif key == "up": + cur_ax_dat[0] = 0 if axis == len(data.shape) - 1 else axis + 1 + elif key == "down": + cur_ax_dat[0] = len(data.shape) - 1 if axis == 0 else axis - 1 + elif key == "end": on_changed(data.shape[axis] - 1, axis) - elif key == 'home': + elif key == "home": on_changed(0, axis) - figure.canvas.mpl_connect('key_press_event', on_keypressed) + figure.canvas.mpl_connect("key_press_event", on_keypressed) for axis, ctrl in enumerate(sliders): ctrl.on_changed(lambda k, a=axis: on_changed(k, a)) @@ -3490,7 +3843,7 @@ def on_keypressed(event, data=data, current=current): def _app_show(): """Block the GUI. For use as skimage plugin.""" - pyplot = sys.modules['matplotlib.pyplot'] + pyplot = sys.modules["matplotlib.pyplot"] pyplot.show() @@ -3508,35 +3861,80 @@ def main(argv=None): search_doc = lambda r, d: re.search(r, __doc__).group(1) if __doc__ else d parser = optparse.OptionParser( usage="usage: %prog [options] path", - description=search_doc("\n\n([^|]*?)\n\n", ''), - version="%%prog %s" % search_doc(":Version: (.*)", "Unknown")) + description=search_doc("\n\n([^|]*?)\n\n", ""), + version="%%prog %s" % search_doc(":Version: (.*)", "Unknown"), + ) opt = parser.add_option - opt('-p', '--page', dest='page', type='int', default=-1, - help="display single page") - opt('-s', '--series', dest='series', type='int', default=-1, - help="display series of pages of same shape") - opt('--nomultifile', dest='nomultifile', action='store_true', - default=False, help="don't read OME series from multiple files") - opt('--noplot', dest='noplot', action='store_true', default=False, - help="don't display images") - opt('--interpol', dest='interpol', metavar='INTERPOL', default='bilinear', - help="image interpolation method") - opt('--dpi', dest='dpi', type='int', default=96, - help="set plot resolution") - opt('--debug', dest='debug', action='store_true', default=False, - help="raise exception on failures") - opt('--test', dest='test', action='store_true', default=False, - help="try read all images in path") - opt('--doctest', dest='doctest', action='store_true', default=False, - help="runs the internal tests") - opt('-v', '--verbose', dest='verbose', action='store_true', default=True) - opt('-q', '--quiet', dest='verbose', action='store_false') + opt( + "-p", + "--page", + dest="page", + type="int", + default=-1, + help="display single page", + ) + opt( + "-s", + "--series", + dest="series", + type="int", + default=-1, + help="display series of pages of same shape", + ) + opt( + "--nomultifile", + dest="nomultifile", + action="store_true", + default=False, + help="don't read OME series from multiple files", + ) + opt( + "--noplot", + dest="noplot", + action="store_true", + default=False, + help="don't display images", + ) + opt( + "--interpol", + dest="interpol", + metavar="INTERPOL", + default="bilinear", + help="image interpolation method", + ) + opt( + "--dpi", dest="dpi", type="int", default=96, help="set plot resolution" + ) + opt( + "--debug", + dest="debug", + action="store_true", + default=False, + help="raise exception on failures", + ) + opt( + "--test", + dest="test", + action="store_true", + default=False, + help="try read all images in path", + ) + opt( + "--doctest", + dest="doctest", + action="store_true", + default=False, + help="runs the internal tests", + ) + opt("-v", "--verbose", dest="verbose", action="store_true", default=True) + opt("-q", "--quiet", dest="verbose", action="store_false") settings, path = parser.parse_args() - path = ' '.join(path) + path = " ".join(path) if settings.doctest: import doctest + doctest.testmod() return 0 if not path: @@ -3545,16 +3943,16 @@ def main(argv=None): test_tifffile(path, settings.verbose) return 0 - if any(i in path for i in '?*'): + if any(i in path for i in "?*"): path = glob.glob(path) if not path: - print('no files match the pattern') + print("no files match the pattern") return 0 # TODO: handle image sequences - #if len(path) == 1: + # if len(path) == 1: path = path[0] - print("Reading file structure...", end=' ') + print("Reading file structure...", end=" ") start = time.time() try: tif = TiffFile(path, multifile=not settings.nomultifile) @@ -3564,39 +3962,45 @@ def main(argv=None): else: print("\n", e) sys.exit(0) - print("%.3f ms" % ((time.time()-start) * 1e3)) + print("%.3f ms" % ((time.time() - start) * 1e3)) if tif.is_ome: settings.norgb = True images = [(None, tif[0 if settings.page < 0 else settings.page])] if not settings.noplot: - print("Reading image data... ", end=' ') + print("Reading image data... ", end=" ") def notnone(x): return next(i for i in x if i is not None) + start = time.time() try: if settings.page >= 0: - images = [(tif.asarray(key=settings.page), - tif[settings.page])] + images = [(tif.asarray(key=settings.page), tif[settings.page])] elif settings.series >= 0: - images = [(tif.asarray(series=settings.series), - notnone(tif.series[settings.series].pages))] + images = [ + ( + tif.asarray(series=settings.series), + notnone(tif.series[settings.series].pages), + ) + ] else: images = [] for i, s in enumerate(tif.series): try: images.append( - (tif.asarray(series=i), notnone(s.pages))) + (tif.asarray(series=i), notnone(s.pages)) + ) except ValueError as e: images.append((None, notnone(s.pages))) if settings.debug: raise else: - print("\n* series %i failed: %s... " % (i, e), - end='') - print("%.3f ms" % ((time.time()-start) * 1e3)) + print( + "\n* series %i failed: %s... " % (i, e), end="" + ) + print("%.3f ms" % ((time.time() - start) * 1e3)) except Exception as e: if settings.debug: raise @@ -3608,7 +4012,7 @@ def notnone(x): print("\nTIFF file:", tif) print() for i, s in enumerate(tif.series): - print ("Series %i" % i) + print("Series %i" % i) print(s) print() for i, page in images: @@ -3616,20 +4020,27 @@ def notnone(x): print(page.tags) if page.is_palette: print("\nColor Map:", page.color_map.shape, page.color_map.dtype) - for attr in ('cz_lsm_info', 'cz_lsm_scan_information', 'mm_uic_tags', - 'mm_header', 'imagej_tags', 'micromanager_metadata', - 'nih_image_header'): + for attr in ( + "cz_lsm_info", + "cz_lsm_scan_information", + "mm_uic_tags", + "mm_header", + "imagej_tags", + "micromanager_metadata", + "nih_image_header", + ): if hasattr(page, attr): print("", attr.upper(), Record(getattr(page, attr)), sep="\n") print() if page.is_micromanager: - print('MICROMANAGER_FILE_METADATA') + print("MICROMANAGER_FILE_METADATA") print(Record(tif.micromanager_metadata)) if images and not settings.noplot: try: import matplotlib - matplotlib.use('TkAgg') + + matplotlib.use("TkAgg") from matplotlib import pyplot except ImportError as e: warnings.warn("failed to import matplotlib.\n%s" % e) @@ -3638,23 +4049,28 @@ def notnone(x): if img is None: continue vmin, vmax = None, None - if 'gdal_nodata' in page.tags: + if "gdal_nodata" in page.tags: vmin = numpy.min(img[img > float(page.gdal_nodata)]) if page.is_stk: try: - vmin = page.mm_uic_tags['min_scale'] - vmax = page.mm_uic_tags['max_scale'] + vmin = page.mm_uic_tags["min_scale"] + vmax = page.mm_uic_tags["max_scale"] except KeyError: pass else: if vmax <= vmin: vmin, vmax = None, None title = "%s\n %s" % (str(tif), str(page)) - imshow(img, title=title, vmin=vmin, vmax=vmax, - bitspersample=page.bits_per_sample, - photometric=page.photometric, - interpolation=settings.interpol, - dpi=settings.dpi) + imshow( + img, + title=title, + vmin=vmin, + vmax=vmax, + bitspersample=page.bits_per_sample, + photometric=page.photometric, + interpolation=settings.interpol, + dpi=settings.dpi, + ) pyplot.show() @@ -3665,4 +4081,4 @@ def notnone(x): unicode = str if __name__ == "__main__": - sys.exit(main()) \ No newline at end of file + sys.exit(main()) diff --git a/diffpy/srxplanar/version.py b/diffpy/srxplanar/version.py index 08b9544..4a9e1ae 100644 --- a/diffpy/srxplanar/version.py +++ b/diffpy/srxplanar/version.py @@ -13,19 +13,18 @@ # ############################################################################## -"""Definition of __version__, __date__, __gitsha__. -""" +"""Definition of __version__, __date__, __gitsha__.""" from pkg_resources import resource_stream from ConfigParser import SafeConfigParser # obtain version information from the version.cfg file cp = SafeConfigParser() -cp.readfp(resource_stream(__name__, 'version.cfg')) +cp.readfp(resource_stream(__name__, "version.cfg")) -__version__ = cp.get('DEFAULT', 'version') -__date__ = cp.get('DEFAULT', 'date') -__gitsha__ = cp.get('DEFAULT', 'commit') +__version__ = cp.get("DEFAULT", "version") +__date__ = cp.get("DEFAULT", "date") +__gitsha__ = cp.get("DEFAULT", "commit") del cp diff --git a/doc/manual/source/conf.py b/doc/manual/source/conf.py index dbd1ae7..c19073f 100644 --- a/doc/manual/source/conf.py +++ b/doc/manual/source/conf.py @@ -18,85 +18,91 @@ # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. -#sys.path.insert(0, os.path.abspath('.')) -sys.path.insert(0, os.path.abspath('../../..')) +# sys.path.insert(0, os.path.abspath('.')) +sys.path.insert(0, os.path.abspath("../../..")) # abbreviations -ab_authors = u'Xiaohao Yang, Simon J.L. Billinge group' +ab_authors = "Xiaohao Yang, Simon J.L. Billinge group" # -- General configuration ----------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. -#needs_sphinx = '1.0' +# needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. -extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.coverage', 'sphinx.ext.pngmath'] +extensions = [ + "sphinx.ext.autodoc", + "sphinx.ext.doctest", + "sphinx.ext.coverage", + "sphinx.ext.pngmath", +] # Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] +templates_path = ["_templates"] # The suffix of source filenames. -source_suffix = '.rst' +source_suffix = ".rst" # The encoding of source files. -#source_encoding = 'utf-8-sig' +# source_encoding = 'utf-8-sig' # The master toctree document. -master_doc = 'index' +master_doc = "index" # General information about the project. -project = u'diffpy.srxplanar' -copyright = u'%Y, Columbia University' +project = "diffpy.srxplanar" +copyright = "%Y, Columbia University" # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. from setup import versiondata -fullversion = versiondata.get('DEFAULT', 'version') + +fullversion = versiondata.get("DEFAULT", "version") # The short X.Y version. -version = '.'.join(fullversion.split('.')[:2]) +version = ".".join(fullversion.split(".")[:2]) # The full version, including alpha/beta/rc tags. release = fullversion # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. -#language = None +# language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: -#today = '' -today_seconds = versiondata.getint('DEFAULT', 'timestamp') -today = time.strftime('%B %d, %Y', time.localtime(today_seconds)) +# today = '' +today_seconds = versiondata.getint("DEFAULT", "timestamp") +today = time.strftime("%B %d, %Y", time.localtime(today_seconds)) year = today.split()[-1] # Else, today_fmt is used as the format for a strftime call. -#today_fmt = '%B %d, %Y' +# today_fmt = '%B %d, %Y' # substitute YEAR in the copyright string -copyright = copyright.replace('%Y', year) +copyright = copyright.replace("%Y", year) # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [] # The reST default role (used for this markup: `text`) to use for all documents. -#default_role = None +# default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. -#add_function_parentheses = True +# add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). -#add_module_names = True +# add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. -#show_authors = False +# show_authors = False # The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' +pygments_style = "sphinx" # A list of ignored prefixes for module index sorting. -modindex_common_prefix = ['diffpy.srxplanar'] +modindex_common_prefix = ["diffpy.srxplanar"] # Display all warnings for missing links. nitpicky = True @@ -105,135 +111,135 @@ # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. -html_theme = 'default' +html_theme = "default" # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. -#html_theme_options = {} -html_theme_options = {'collapsiblesidebar' : 'true'} +# html_theme_options = {} +html_theme_options = {"collapsiblesidebar": "true"} # Add any paths that contain custom themes here, relative to this directory. -#html_theme_path = [] +# html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". -#html_title = None +# html_title = None # A shorter title for the navigation bar. Default is the same as html_title. -#html_short_title = None +# html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. -#html_logo = None +# html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. -#html_favicon = None +# html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] +html_static_path = ["_static"] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. -#html_last_updated_fmt = '%b %d, %Y' +# html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. -#html_use_smartypants = True +# html_use_smartypants = True # Custom sidebar templates, maps document names to template names. -#html_sidebars = {} +# html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. -#html_additional_pages = {} +# html_additional_pages = {} # If false, no module index is generated. -#html_domain_indices = True +# html_domain_indices = True # If false, no index is generated. -#html_use_index = True +# html_use_index = True # If true, the index is split into individual pages for each letter. -#html_split_index = False +# html_split_index = False # If true, links to the reST sources are added to the pages. -#html_show_sourcelink = True +# html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -#html_show_sphinx = True +# html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. -#html_show_copyright = True +# html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. -#html_use_opensearch = '' +# html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). -#html_file_suffix = None +# html_file_suffix = None # Output file base name for HTML help builder. -htmlhelp_basename = 'Structuredoc' +htmlhelp_basename = "Structuredoc" # -- Options for LaTeX output -------------------------------------------------- latex_elements = { -# The paper size ('letterpaper' or 'a4paper'). -#'papersize': 'letterpaper', - -# The font size ('10pt', '11pt' or '12pt'). -#'pointsize': '10pt', - -# Additional stuff for the LaTeX preamble. -#'preamble': '', + # The paper size ('letterpaper' or 'a4paper'). + #'papersize': 'letterpaper', + # The font size ('10pt', '11pt' or '12pt'). + #'pointsize': '10pt', + # Additional stuff for the LaTeX preamble. + #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ - ('index', 'srxplanar_manual.tex', u'srxplanar Documentation', - ab_authors, 'manual'), + ( + "index", + "srxplanar_manual.tex", + "srxplanar Documentation", + ab_authors, + "manual", + ), ] # The name of an image file (relative to this directory) to place at the top of # the title page. -#latex_logo = None +# latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. -#latex_use_parts = False +# latex_use_parts = False # If true, show page references after internal links. -#latex_show_pagerefs = False +# latex_show_pagerefs = False # If true, show URL addresses after external links. -#latex_show_urls = False +# latex_show_urls = False # Documents to append as an appendix to all manuals. -#latex_appendices = [] +# latex_appendices = [] # If false, no module index is generated. -#latex_domain_indices = True +# latex_domain_indices = True # -- Options for manual page output -------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). -man_pages = [ - ('index', 'srxplanar', u'srxplanar Documentation', - ab_authors, 1) -] +man_pages = [("index", "srxplanar", "srxplanar Documentation", ab_authors, 1)] # If true, show URL addresses after external links. -#man_show_urls = False +# man_show_urls = False # -- Options for Texinfo output ------------------------------------------------ @@ -242,19 +248,25 @@ # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ - ('index', 'srxplanar', u'srxplanar Documentation', - ab_authors, 'srxplanar', 'One line description of project.', - 'Miscellaneous'), + ( + "index", + "srxplanar", + "srxplanar Documentation", + ab_authors, + "srxplanar", + "One line description of project.", + "Miscellaneous", + ), ] # Documents to append as an appendix to all manuals. -#texinfo_appendices = [] +# texinfo_appendices = [] # If false, no module index is generated. -#texinfo_domain_indices = True +# texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. -#texinfo_show_urls = 'footnote' +# texinfo_show_urls = 'footnote' # Example configuration for intersphinx: refer to the Python standard library. diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000..a24a49e --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,22 @@ +[tool.black] +line-length = 79 +include = '\.pyi?$' +exclude = ''' +/( +\.git +| \.hg +| \.mypy_cache +IUCr macros version 2.1.17: 2023/10/19 +45 +| \.tox +| \.venv +| \.rst +| \.txt +| _build +| buck-out +| build +| dist +| blib2to3 +| tests/data +)/ +''' \ No newline at end of file diff --git a/setup.py b/setup.py index 1053272..dedc939 100755 --- a/setup.py +++ b/setup.py @@ -14,82 +14,86 @@ # versioncfgfile holds version data for git commit hash and date. # It must reside in the same directory as version.py. MYDIR = os.path.dirname(os.path.abspath(__file__)) -versioncfgfile = os.path.join(MYDIR, 'diffpy/srxplanar/version.cfg') +versioncfgfile = os.path.join(MYDIR, "diffpy/srxplanar/version.cfg") + def gitinfo(): from subprocess import Popen, PIPE + kw = dict(stdout=PIPE, cwd=MYDIR) - proc = Popen(['git', 'describe', '--match=v[[:digit:]]*'], **kw) + proc = Popen(["git", "describe", "--match=v[[:digit:]]*"], **kw) desc = proc.stdout.read() - proc = Popen(['git', 'log', '-1', '--format=%H %at %ai'], **kw) + proc = Popen(["git", "log", "-1", "--format=%H %at %ai"], **kw) glog = proc.stdout.read() rv = {} - rv['version'] = '-'.join(desc.strip().split('-')[:-1]).lstrip('v') - rv['commit'], rv['timestamp'], rv['date'] = glog.strip().split(None, 2) + rv["version"] = "-".join(desc.strip().split("-")[:-1]).lstrip("v") + rv["commit"], rv["timestamp"], rv["date"] = glog.strip().split(None, 2) return rv + def getversioncfg(config=versioncfgfile): from ConfigParser import SafeConfigParser + cp = SafeConfigParser() cp.read(config) - gitdir = os.path.join(MYDIR, '.git') - if not os.path.exists(gitdir): return cp + gitdir = os.path.join(MYDIR, ".git") + if not os.path.exists(gitdir): + return cp d = cp.defaults() g = gitinfo() - if g['version'] != d.get('version') or g['commit'] != d.get('commit'): - cp.set('DEFAULT', 'version', g['version']) - cp.set('DEFAULT', 'commit', g['commit']) - cp.set('DEFAULT', 'date', g['date']) - cp.set('DEFAULT', 'timestamp', g['timestamp']) - cp.write(open(config, 'w')) + if g["version"] != d.get("version") or g["commit"] != d.get("commit"): + cp.set("DEFAULT", "version", g["version"]) + cp.set("DEFAULT", "commit", g["commit"]) + cp.set("DEFAULT", "date", g["date"]) + cp.set("DEFAULT", "timestamp", g["timestamp"]) + cp.write(open(config, "w")) return cp + # generate version.cfg for diffpy.confutils -versioncfgfile1 = os.path.join(MYDIR, 'diffpy/confutils/version.cfg') +versioncfgfile1 = os.path.join(MYDIR, "diffpy/confutils/version.cfg") getversioncfg(versioncfgfile1) versiondata = getversioncfg(versioncfgfile) # define distribution setup_args = dict( - name="diffpy.srxplanar", - version=versiondata.get('DEFAULT', 'version'), - namespace_packages=['diffpy'], - packages=find_packages(), - include_package_data=True, - zip_safe=False, - entry_points={ - # define console_scripts here, see setuptools docs for details. - 'console_scripts' : ['srxplanar = diffpy.srxplanar.srxplanar:main' - ], - }, - - author='Simon J.L. Billinge', - author_email='sb2896@columbia.edu', - maintainer='Xiaohao Yang', - maintainer_email='sodestiny1@gmail.com', - url='https://github.com/diffpy/diffpy.srxplanar', - description="2D diffraction image integration and uncertainty propagation", - license='BSD-style license', - keywords="diffpy planar integration non-splitting uncertainty", - classifiers=[ - # List of possible values at - # http://pypi.python.org/pypi?:action=list_classifiers - 'Development Status :: 5 - Production/Stable', - 'Environment :: Console', - 'Intended Audience :: Science/Research', - 'License :: OSI Approved :: BSD License', - 'Operating System :: MacOS :: MacOS X', - 'Operating System :: Microsoft :: Windows', - 'Operating System :: POSIX', - 'Operating System :: Unix', - 'Programming Language :: Python :: 2.6', - 'Programming Language :: Python :: 2.7', - 'Topic :: Scientific/Engineering :: Chemistry', - 'Topic :: Scientific/Engineering :: Physics', - ], + name="diffpy.srxplanar", + version=versiondata.get("DEFAULT", "version"), + namespace_packages=["diffpy"], + packages=find_packages(), + include_package_data=True, + zip_safe=False, + entry_points={ + # define console_scripts here, see setuptools docs for details. + "console_scripts": ["srxplanar = diffpy.srxplanar.srxplanar:main"], + }, + author="Simon J.L. Billinge", + author_email="sb2896@columbia.edu", + maintainer="Xiaohao Yang", + maintainer_email="sodestiny1@gmail.com", + url="https://github.com/diffpy/diffpy.srxplanar", + description="2D diffraction image integration and uncertainty propagation", + license="BSD-style license", + keywords="diffpy planar integration non-splitting uncertainty", + classifiers=[ + # List of possible values at + # http://pypi.python.org/pypi?:action=list_classifiers + "Development Status :: 5 - Production/Stable", + "Environment :: Console", + "Intended Audience :: Science/Research", + "License :: OSI Approved :: BSD License", + "Operating System :: MacOS :: MacOS X", + "Operating System :: Microsoft :: Windows", + "Operating System :: POSIX", + "Operating System :: Unix", + "Programming Language :: Python :: 2.6", + "Programming Language :: Python :: 2.7", + "Topic :: Scientific/Engineering :: Chemistry", + "Topic :: Scientific/Engineering :: Physics", + ], ) -if __name__ == '__main__': +if __name__ == "__main__": setup(**setup_args) # End of file