diff --git a/.github/workflows/python-app.yml b/.github/workflows/python-app.yml index 1168bd9..38c230c 100644 --- a/.github/workflows/python-app.yml +++ b/.github/workflows/python-app.yml @@ -28,6 +28,7 @@ jobs: python -m pip install --upgrade pip pip install flake8 pytest if [ -f requirements.txt ]; then pip install -r requirements.txt; fi + pip install . - name: Lint with flake8 run: | # stop the build if there are Python syntax errors or undefined names diff --git a/.github/workflows/python-package-conda.yml b/.github/workflows/python-package-conda.yml deleted file mode 100644 index f358604..0000000 --- a/.github/workflows/python-package-conda.yml +++ /dev/null @@ -1,34 +0,0 @@ -name: Python Package using Conda - -on: [push] - -jobs: - build-linux: - runs-on: ubuntu-latest - strategy: - max-parallel: 5 - - steps: - - uses: actions/checkout@v4 - - name: Set up Python 3.10 - uses: actions/setup-python@v3 - with: - python-version: '3.10' - - name: Add conda to system path - run: | - # $CONDA is an environment variable pointing to the root of the miniconda directory - echo $CONDA/bin >> $GITHUB_PATH - - name: Install dependencies - run: | - conda env update --file environment.yml --name base - - name: Lint with flake8 - run: | - conda install flake8 - # stop the build if there are Python syntax errors or undefined names - flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics - # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide - flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics - - name: Test with pytest - run: | - conda install pytest - pytest diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000..486e1a8 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,17 @@ +repos: + - repo: https://github.com/psf/black + rev: 24.10.0 # Replace by any tag/version: https://github.com/psf/black/tags + hooks: + - id: black + language_version: python3 # Should be a command that runs python3.6+ + args: ["--line-length", "99"] + + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v5.0.0 + hooks: + - id: trailing-whitespace + - id: no-commit-to-branch + args: [-b master] + +default_language_version: + python: python3 diff --git a/README.md b/README.md index 4791eb5..538cad7 100644 --- a/README.md +++ b/README.md @@ -2,32 +2,32 @@ Jin Group Tools for handling a variety of common data. Builds upon the WrightTools Data object. - -## Installation - -Basic: - -`pip install git+https://github.com/wright-group/makeitwright.git` - -iontof support is considered optional; if you need to use iontof data, use - -`pip install git+https://github.com/wright-group/makeitwright.git[iontof]` - - ## Features - a module for each instrument featured - - AFM + - AFM (Gwiddion) - Andor Neo Camera (Solis) - Becker and Hickl SPCM - Horiba LabRAM - Generic Images - ion TOF - XRD (Bruker) -- various data importers to create the WrightTools Data objects. - preset styles and routines for making quick figures +## Installation + +### Basic + +`pip install git+https://github.com/wright-group/makeitwright.git` + +### IonTOF + +support for iontof data is optional; if you need to use iontof data, specify additional imports using: + +`pip install git+https://github.com/wright-group/makeitwright.git[iontof]` + + ## Examples TODO diff --git a/scripts/AbsProcessing.py b/examples/AbsProcessing.py similarity index 93% rename from scripts/AbsProcessing.py rename to examples/AbsProcessing.py index 102a95f..50bde2f 100644 --- a/scripts/AbsProcessing.py +++ b/examples/AbsProcessing.py @@ -1,16 +1,19 @@ -# Process Reflectance/Transmittance/Absorbance Data from Wright group +# Process Reflectance/Transmittance/Absorbance Data from Wright group Microscope import pathlib -import makeitwright.process.andor as andor -from makeitwright.process.helpers import roi -from makeitwright.parsers import parse -from makeitwright.artists import setparams, setdpi -from makeitwright.spectra import plot_spectra as plot +import makeitwright as mw +import matplotlib as mpl -setparams() -setdpi(150) +andor = mw.andor +roi = mw.helpers.roi +parse = mw.parsers.parse +plot = mw.spectra.plot_spectra + + +mpl.rc(dpi=150) + filepath = pathlib.Path().expanduser().resolve() / "Desktop/Research Data/Wright Table/Original/test" filename_R = "PEAPbBr4 R" diff --git a/scripts/PLProcessing.py b/examples/PLProcessing.py similarity index 85% rename from scripts/PLProcessing.py rename to examples/PLProcessing.py index 559fc0d..bb061bb 100644 --- a/scripts/PLProcessing.py +++ b/examples/PLProcessing.py @@ -2,15 +2,14 @@ # Process PL Data from Wright group import pathlib -import makeitwright.process.andor as andor -from makeitwright.process.helpers import roi -from makeitwright.parsers import parse -from makeitwright.artists import setparams, setdpi -from makeitwright.spectra import plot_spectra as plot +import makeitwright as mw -setparams() -setdpi(150) +andor = mw.andor +roi = mw.helpers.roi +parse = mw.parsers.parse +plot = mw.spectra.plot_spectra + filepath = pathlib.Path().expanduser() / "Desktop" / "Research Data" / "Wright Table" / "Original" / "test" filename = "PEAPbI on FPEASnI PL 77K 4 2 hour wait for cool" diff --git a/scripts/main.py b/examples/main.py similarity index 92% rename from scripts/main.py rename to examples/main.py index 1210156..8d21758 100644 --- a/scripts/main.py +++ b/examples/main.py @@ -1,11 +1,12 @@ import matplotlib as mpl from pathlib import Path -from makeitwright.process import andor +import makeitwright as mw -from makeitwright.process.helpers import show, roi, set_label -from makeitwright.parsers import parse -from makeitwright.artists import plot +andor = mw.andor +roi = mw.helpers.roi +parse = mw.parsers.parse +plot = mw.artists.plot user_path = Path().expanduser().resolve() diff --git a/scripts/main2.py b/examples/pl_T_R_A.py similarity index 57% rename from scripts/main2.py rename to examples/pl_T_R_A.py index 5ba3adc..305c64d 100644 --- a/scripts/main2.py +++ b/examples/pl_T_R_A.py @@ -1,58 +1,49 @@ import pathlib -import numpy as np -import matplotlib.cm as cms -import cmocean.cm as cmo -from scipy.signal import savgol_filter as savgol -from scipy.signal import medfilt2d -from scipy.optimize import curve_fit -from scipy.stats import pearsonr, spearmanr, ttest_ind -import WrightTools as wt +import matplotlib as mpl +import makeitwright as mw -import makeitwright.process.andor as andor -import makeitwright.process.beckerhickl as becker -import makeitwright.process.spectralprofile -from makeitwright.process.helpers import show, roi, norm, set_label -from makeitwright.parsers import parse -from makeitwright.artists import setparams, setdpi -from makeitwright.spectra import plot_spectra as plot +roi = mw.helpers.roi +parse = mw.parsers.parse +andor = mw.andor +becker = mw.beckerhickl +plot = mw.spectra.plot_spectra -setparams() -setdpi(150) +fp = pathlib.Path().expanduser().resolve() / r"Desktop/Research Data/Wright Table/Original" -fp = pathlib.Path().expanduser().resolve() / r"Desktop/Research Data/Wright Table/Original" # filepath name to folder +# set plotting parameters +mpl.rcParams['font.sans-serif'] = "Arial" +mpl.rcParams['font.family'] = "sans-serif" +mpl.rcParams['font.size'] = 14 +mpl.rcParams['figure.dpi'] = 300 +mpl.rcParams['lines.linewidth'] = 4 +mpl.rcParams['pcolor.shading'] = 'auto' +mpl.rcParams['figure.dpi'] = 150 - -# if True: # Plot PL data = parse(fp, keywords='4 hr.asc') - #andor.correct_PL_background(data, ybkg=[0, 20]) - #y_profile = roi(data, {'wl': ([400, 800], 'sum')}) # If need to check object area - #plot(y_profile) PL_ROI = roi(data, {'y': ([1021, 1047], 'average')}) plot(PL_ROI, channel=0, xrange=[500, 850]) - PL_output = open('C:/Users/kmfor/Desktop/Research Data/Wright Table/Original/4hr.txt', 'w') + PL_output = open(fp / '4hr.txt', 'w') PL_dataTrace = zip(PL_ROI.axes[0], PL_ROI.channels[0]) for x in PL_dataTrace: PL_output.write(str(x[0])+'\t') PL_output.write(str(x[1])+'\n') PL_output.close() -# if True: # Plot T/R/A - data = parse('C:/Users/kmfor/Desktop/Research Data/Wright Table/Original/For Chris/23_11_21/4ClPEASnI n1', keywords='Object 3') + data = parse(fp / 'For Chris/23_11_21/4ClPEASnI n1', keywords='Object 3') R = data[2] R_back = data[1] T = data[4] T_back = data[3] - andor.compute_reflectance(R, R_back, dark_wavelength_range=None) y_profile = roi(R, {'wl': ([580, 750], 'sum')}) # If need to check object area plot(y_profile) plot(R, channel=1, ROI={'y': ([1020, 1070], 'average')}, xrange=[580, 750]) #Currently at 10 x mag R_ROI = roi(R, {'y': ([1020, 1070], 'average')}) - R_output = open('C:/Users/kmfor/Desktop/Research Data/Wright Table/Original/For Chris/23_11_21/4ClPEASnI n1/Object 3 R processed.txt', 'w') + R_output = open(fp / 'For Chris/23_11_21/4ClPEASnI n1/Object 3 R processed.txt', 'w') R_dataTrace = zip(R_ROI.axes[0], R_ROI.channels[1]) for x in R_dataTrace: R_output.write(str(x[0])+'\t') @@ -64,7 +55,7 @@ # plot(y_profile) plot(T, channel=1, ROI={'y': ([1020, 1070], 'average')}, xrange=[580, 750]) # Current 10x mag, 100x mag 54-70 T_ROI = roi(T, {'y': ([1020, 1070], 'average')}) - T_output = open('C:/Users/kmfor/Desktop/Research Data/Wright Table/Original/For Chris/23_11_21/4ClPEASnI n1/Object 3 T Processed.txt', 'w') + T_output = open(fp / 'For Chris/23_11_21/4ClPEASnI n1/Object 3 T Processed.txt', 'w') T_dataTrace = zip(T_ROI.axes[0], T_ROI.channels[1]) for x in T_dataTrace: T_output.write(str(x[0])+'\t') @@ -72,7 +63,7 @@ T_output.close() # andor.compute_absorbance(R, T) - A_output = open('C:/Users/kmfor/Desktop/Research Data/Wright Table/Original/For Chris/23_11_21/4ClPEASnI n1/Object 3 A processed.txt', 'w') + A_output = open(fp / 'For Chris/23_11_21/4ClPEASnI n1/Object 3 A processed.txt', 'w') A_ROI = roi(T, {'y': ([1020, 1070], 'average')}) # A is channel 2 in both R and T data objects plot(R, channel=2, ROI={'y': ([1020, 1070], 'average')}, xrange=[580, 750]) #Current 10x mag. can add vrange A_dataTrace = zip(A_ROI.axes[0], A_ROI.channels[2]) diff --git a/scripts/workup.py b/examples/workup.py similarity index 98% rename from scripts/workup.py rename to examples/workup.py index 07229de..57569c4 100644 --- a/scripts/workup.py +++ b/examples/workup.py @@ -8,6 +8,15 @@ from scipy.signal import savgol_filter from matplotlib import pyplot as plt +import makeitwright as mw + + +parse = mw.parsers.parse +__at = mw.helpers.find_nearest() +roi = mw.helpers.roi +set_label = mw.helpers.set_label +norm = mw.helpers.norm + def lorentz_fit_2(data, channel='darksub', xrange='all', bounds=None, plot=False): @@ -136,6 +145,7 @@ def test2(x, a1, u1, s1, a2, u2, s2): def residual(a, fit): return (a-fit)/a*100 + base = pathlib.Path().expanduser().resolve() / r'OneDrive/Documents/UW/research/data local/WG-microscope/biexciton-fluence-dependent-PL_20220909' fn1 = base / "n1BA" fn2 = base / 'n2BAMA_CRRsample' @@ -201,7 +211,7 @@ def residual(a, fit): wlbkgsub = (d['sig'][:]-wlbkg)/exposure d.create_channel('constantbkgsub',values=constantbkgsub) d.create_channel('wlbkgsub',values=wlbkgsub) - set_label(d, 'wlbkgsub', "PL intensity (cps)") + # set_label(d, 'wlbkgsub', "PL intensity (cps)") bn1 = roi(n1raw,{'y':'sum'}) bn2 = roi(n2raw,{'y':'sum'}) diff --git a/makeitwright/VERSION b/makeitwright/VERSION new file mode 100644 index 0000000..8a9ecc2 --- /dev/null +++ b/makeitwright/VERSION @@ -0,0 +1 @@ +0.0.1 \ No newline at end of file diff --git a/makeitwright/__init__.py b/makeitwright/__init__.py index e69de29..0db4ef6 100644 --- a/makeitwright/__init__.py +++ b/makeitwright/__init__.py @@ -0,0 +1,8 @@ +from .__version__ import * +from .core import * + +from . import andor +from . import beckerhickl +from . import horiba +from . import iontof +from . import xrd diff --git a/makeitwright/__version__.py b/makeitwright/__version__.py new file mode 100644 index 0000000..3b03f91 --- /dev/null +++ b/makeitwright/__version__.py @@ -0,0 +1,20 @@ +import pathlib + + +here = pathlib.Path(__file__).resolve().parent + + +__all__ = ["__version__", "__branch__"] +__version__ = pathlib.Path(here / "VERSION").read_text().strip() + +p = here.parent / ".git" +if p.is_file(): + with open(str(p)) as f: + p = p.parent / f.readline()[8:].strip() # Strip "gitdir: " +p = p / "HEAD" +if p.exists(): + with open(str(p)) as f: + __branch__ = f.readline().rstrip().split(r"/")[-1] + __version__ += "+" + __branch__ +else: + __branch__ = None diff --git a/makeitwright/process/andor.py b/makeitwright/andor.py similarity index 78% rename from makeitwright/process/andor.py rename to makeitwright/andor.py index 01d6ce3..77d6c56 100644 --- a/makeitwright/process/andor.py +++ b/makeitwright/andor.py @@ -1,13 +1,9 @@ -import pathlib -import os import warnings import numpy as np -import WrightTools as wt -from . import image -from . import spectralprofile -from .helpers import roi, set_label, get_channels -import makeitwright.styles as styles +from .core import image, spectralprofile, styles +from .core.helpers import roi, set_label, get_channels + APD_PIXEL = (1325, 1080) SLIT_PIXEL_COLUMN = 1325 @@ -52,184 +48,6 @@ def plot_decomposition(data, channel=0, mode='R', **kwargs): spectralprofile.plot_decomposition(data, 'wl', 'y', channel, **params) -def fromAndorNeo(fpath, name=None, objective_lens='prompt', cps=False): - """Create a data object from Andor Solis software (ascii exports). - - Parameters - ---------- - fpath : path-like - Path to file (should be .asc format). - Can be either a local or remote file (http/ftp). - Can be compressed with gz/bz2, decompression based on file name. - name : string (optional) - Name to give to the created data object. If None, filename is used. - Default is None. - - Returns - ------- - data - New data object. - """ - - objective_lenses = { - '5x-Jin' : 0.893, - '20x-Jin' : 3.52, - '100x-Wright' : 18.2, - '5' : 0.893, - '20' : 3.52, - '100' : 18.2, - 5 : 0.893, - 20 : 3.52, - 100 : 18.2 - } - - # parse filepath - filestr = os.fspath(fpath) - filepath = pathlib.Path(fpath) - - if not ".asc" in filepath.suffixes: - wt.exceptions.WrongFileTypeWarning.warn(filepath, ".asc") - # parse name - if name is None: - name = filepath.name.split("/")[-1] - - if objective_lens=='prompt': - objective_lens = input(f'enter magnification for data at {name}: ') - if not objective_lens: - objective_lens = 0 - - # create data - ds = np.DataSource(None) - f = ds.open(filestr, "rt") - axis0 = [] - arr = [] - attrs = {} - - line0 = f.readline().strip()[:-1] - line0 = [float(x) for x in line0.split(",")] # TODO: robust to space, tab, comma - axis0.append(line0.pop(0)) - arr.append(line0) - - def get_frames(f, arr, axis0): - axis0_written = False - while True: - line = f.readline().strip()[:-1] - if len(line) == 0: - break - else: - line = [float(x) for x in line.split(",")] - # signature of new frames is restart of axis0 - if not axis0_written and (line[0] == axis0[0]): - axis0_written = True - if axis0_written: - line.pop(0) - else: - axis0.append(line.pop(0)) - arr.append(line) - return arr, axis0 - - arr, axis0 = get_frames(f, arr, axis0) - nframes = len(arr) // len(axis0) - - i = 0 - while i < 3: - line = f.readline().strip() - if len(line) == 0: - i += 1 - else: - try: - key, val = line.split(":", 1) - except ValueError: - pass - else: - attrs[key.strip()] = val.strip() - - f.close() - - #create data object - arr = np.array(arr) - axis0 = np.array(axis0) - data = wt.Data(name=name) - if float(attrs["Grating Groove Density (l/mm)"]) == 0: - xname = 'x' - dtype = 'image' - try: - axis0 = axis0/objective_lenses[objective_lens] - xunits = 'µm' - except KeyError: - xunits = 'px' - else: - xname = 'wl' - xunits = 'nm' - dtype = 'spectralprofile' - - axis1 = np.arange(arr.shape[-1]) - yname='y' - try: - axis1 = axis1/objective_lenses[objective_lens] - yunits = 'µm' - except KeyError: - yunits = 'px' - - axes = [xname, yname] - - if nframes == 1: - arr = np.array(arr) - data.create_variable(name=xname, values=axis0[:, None], units=xunits) - data.create_variable(name=yname, values=axis1[None, :], units=yunits) - else: - frames = np.arange(nframes) - try: - ct = float(attrs["Kinetic Cycle Time (secs)"]) - frames = frames*ct - tname = 't' - tunits = 's' - except KeyError: - tname = 'frame' - tunits = None - arr = np.array(arr).reshape(nframes, len(axis0), len(arr[0])) - data.create_variable(name=tname, values=frames[:, None, None], units=tunits) - data.create_variable(name=xname, values=axis0[None, :, None], units=xunits) - data.create_variable(name=yname, values=axis1[None, None, :], units=yunits) - axes = [tname] + axes - - if xname=='wl': - if xunits=='nm': - data[xname].attrs['label'] = "wavelength (nm)" - if xunits=='wn': - data[xname].attrs['label'] = "wavenumber (cm-1)" - if xname=='x': - data[xname].attrs['label'] = "x (µm)" - if yname=='y': - data[yname].attrs['label'] = "y (µm)" - - data.transform(*axes) - if cps: - try: - arr = arr/float(attrs["Exposure Time (secs)"]) - except KeyError: - pass - try: - arr = arr/int(attrs["Number of Accumulations"]) - except KeyError: - pass - - data.create_channel(name='sig', values=arr, signed=False) - if cps: - data['sig'].attrs['label'] = "intensity (cps)" - else: - data['sig'].attrs['label'] = "counts" - - for key, val in attrs.items(): - data.attrs[key] = val - - # finish - print("data created at {0}".format(data.fullpath)) - print(" axes: {0}".format(data.axis_names)) - print(" shape: {0}".format(data.shape)) - data.attrs['dtype']=dtype - - return data def _get_reference_material_array(data, channel, material): """ diff --git a/makeitwright/process/beckerhickl.py b/makeitwright/beckerhickl.py similarity index 79% rename from makeitwright/process/beckerhickl.py rename to makeitwright/beckerhickl.py index 231e2e4..7e909ff 100644 --- a/makeitwright/process/beckerhickl.py +++ b/makeitwright/beckerhickl.py @@ -1,51 +1,13 @@ -__name__ = "beckerhickl" -__author__ = "Chris Roy, Song Jin Research Group, Dept. of Chemistry, University of Wisconsin - Madison" - import numpy as np import matplotlib.pyplot as plt import WrightTools as wt from scipy.optimize import curve_fit from scipy.stats import pearsonr -from . import helpers -import makeitwright.spectra as spectra -import makeitwright.styles as styles - - -get_axes = helpers.get_axes -get_channels = helpers.get_channels -set_label = helpers.set_label -roi = helpers.roi - +from .core.helpers import get_axes, get_channels, set_label, roi +from .core import spectra +from .core import styles -def fromSP130(fpath, name=None): - if fpath.split('.')[-1] != 'asc': - print(f"filetype .{fpath.split('.')[-1]} not supported") - else: - with open(fpath) as f: - txt = f.readlines() - header_size = 0 - for i, line in enumerate(txt): - if 'Title' in line.split() and name is None: - name = line.split()[-1] - if '*BLOCK' in line: - header_size = i+1 - - arr = np.genfromtxt(fpath, delimiter=',', skip_header=header_size, skip_footer=1) - t = arr[:,0] - sig = arr[:,1] - t = t-t[np.argmax(sig)] - - out = wt.Data(name=name) - out.create_variable('t', values=t, units='ns') - out['t'].attrs['label'] = "time (ns)" - out.create_channel('sig', values=sig) - out['sig'].attrs['label'] = "PL counts" - out.transform('t') - out.create_channel('norm', values=helpers.norm(out['sig'][:], 0.01, 1)) - out['norm'].attrs['label'] = "norm. PL counts" - - return out def get_fits(data, channel='norm', function='biexp'): def exp(t, a, td): @@ -60,7 +22,7 @@ def biexp(t, a1, td1, a2, td2): fits = {} for i in range(len(data)): - out = helpers.roi(data[i], {'t':[0]}) + out = roi(data[i], {'t':[0]}) fit, cov = curve_fit(functions[function], out['t'][:], out[channel][:], bounds=(0,1000000), maxfev=1000*len(out['t'][:])) std = np.sqrt(np.diag(cov)) diff --git a/makeitwright/core/__init__.py b/makeitwright/core/__init__.py new file mode 100644 index 0000000..60761ac --- /dev/null +++ b/makeitwright/core/__init__.py @@ -0,0 +1,8 @@ +from . import parsers +from . import artists +from . import styles +from . import helpers + +from . import spectra +from . import spectralprofile +from . import hyperspectral diff --git a/makeitwright/artists.py b/makeitwright/core/artists.py similarity index 99% rename from makeitwright/artists.py rename to makeitwright/core/artists.py index 4a52103..7595ac2 100644 --- a/makeitwright/artists.py +++ b/makeitwright/core/artists.py @@ -2,9 +2,10 @@ import WrightTools as wt import matplotlib.cm as cm from matplotlib import pyplot as plt -from .process.helpers import roi, parse_args +from .helpers import roi, parse_args from . import styles + def plot(data, **kwargs): if type(data) is wt.Collection: data = [data[key] for key in data] diff --git a/makeitwright/process/helpers.py b/makeitwright/core/helpers.py similarity index 100% rename from makeitwright/process/helpers.py rename to makeitwright/core/helpers.py diff --git a/makeitwright/process/hyperspectral.py b/makeitwright/core/hyperspectral.py similarity index 98% rename from makeitwright/process/hyperspectral.py rename to makeitwright/core/hyperspectral.py index 8d7a5c3..422f251 100644 --- a/makeitwright/process/hyperspectral.py +++ b/makeitwright/core/hyperspectral.py @@ -1,6 +1,3 @@ -__name__ = "hyperspectral" -__author__ = "Chris Roy, Song Jin Research Group, Dept. of Chemistry, University of Wisconsin - Madison" - """ Processing and plotting methods for 3-dimensional WrightTools data objects containing two spatial axes and one non-spatial axis (i.e. a spectral axis). @@ -11,12 +8,11 @@ Data axes must be ordered (spatial x, spatial y, non-spatial). """ -#import import numpy as np import matplotlib as mpl from matplotlib import pyplot as plt -from . import helpers -import makeitwright.styles as styles +from . import helpers, styles + def remove_background(data, channel, threshold=0.1, negative=False, return_mask=False, max_ref_count=10): """ diff --git a/makeitwright/process/image.py b/makeitwright/core/image.py similarity index 98% rename from makeitwright/process/image.py rename to makeitwright/core/image.py index 34684bd..69d599d 100644 --- a/makeitwright/process/image.py +++ b/makeitwright/core/image.py @@ -1,8 +1,7 @@ import numpy as np import matplotlib as mpl from matplotlib import pyplot as plt -from . import helpers -import makeitwright.styles as styles +from . import helpers, styles def get_pixel_location(data, pixel): """ diff --git a/makeitwright/parsers.py b/makeitwright/core/parsers/__init__.py similarity index 53% rename from makeitwright/parsers.py rename to makeitwright/core/parsers/__init__.py index 5ffaf6a..0e018fa 100644 --- a/makeitwright/parsers.py +++ b/makeitwright/core/parsers/__init__.py @@ -1,14 +1,14 @@ -from psutil import virtual_memory -from os import listdir -from os.path import isfile, isdir, getsize - -from .process import afm, andor, beckerhickl, horiba, xrd -try: # iontof is optional - from .process import iontof -except ImportError: - pass import WrightTools as wt -import numpy as np +import pathlib + +from psutil import virtual_memory + +from .andor import fromAndorNeo +from .gwyddion import fromGwyddion_traces +from .sp130 import fromSP130 +from .horiba import fromLabramHR, horiba_typeID +from .iontof import fromITA, ITApeaks +from .xrd import fromBruker def typeID(*fpaths): @@ -17,23 +17,23 @@ def typeID(*fpaths): The kind will inform on how to correctly import the data. """ types = {} - for fpath in fpaths: - if '.ita' in fpath: + for fpath in map(pathlib.Path, fpaths): + if fpath.suffix == '.ita': types[fpath] = 'iontof_SIMS' print(f"file {fpath} is IonToF SIMS data") - if '.txt' in fpath: + if fpath.suffix == '.txt': with open(fpath) as f: txt = f.read() if "LabRAM HR" in txt: - if horiba.typeID(fpath) is not None: - types[fpath] = horiba.typeID(fpath) + if (htype := horiba_typeID(fpath)) is not None: + types[fpath] = htype if "Goniometer" in txt: types[fpath] = 'Bruker_XRD' if "[m]" in txt: types[fpath] = 'Gwyddion_traces' - if '.asc' in fpath: + if fpath.suffix == '.asc': with open(fpath) as f: txt = f.read() if "*BLOCK" in txt: @@ -41,28 +41,35 @@ def typeID(*fpaths): else: types[fpath] = 'ASCII' - if '.wt5' in fpath: + if fpath.suffix == '.wt5': types[fpath] = 'wt5' print(f"{len(types)} of {len(fpaths)} files identified as valid data types") return types -def listfiles(fdir, flist=[]): - if len(flist) < 1000: - dirlist = [f'{fdir}/{d}' for d in listdir(fdir) if isdir(f'{fdir}/{d}')] - fpaths = flist+[f'{fdir}/{f}' for f in listdir(fdir) if isfile(f'{fdir}/{f}')] - - if dirlist: - for d in dirlist: - fpaths = listfiles(d, flist=fpaths) - - return fpaths - else: - print("Too many files in directory. Process terminated to prevent overflow.") +def listfiles(fdir:str|pathlib.Path, pattern:str="*") -> list[pathlib.Path]: + """Generate a list of filepaths within a directory. + Includes files from nested directories. + + Parameters + ---------- + fdir: path-like + directory to walk + pattern: string + pattern used to filter files. default uses no filter + """ + return [ + pi for pi in filter( + lambda pi: pi.is_file(), pathlib.Path(fdir).rglob(pattern) + ) + ] def parse(fdir, objective, select_types=None, keywords:list|str=[], exclude=[]): + """ + DOCUMENTATION NEEDED + """ files = listfiles(fdir) include = [1 for i in range(len(files))] @@ -81,7 +88,7 @@ def parse(fdir, objective, select_types=None, keywords:list|str=[], exclude=[]): if x in f: include[i]=0 - files = [file for i, file in enumerate(files) if include[i]] + files = [file for i, file in zip(include, files) if i] print(f'found {sum(include)} files matching keyword specifications') ftypes = typeID(*files) @@ -111,43 +118,43 @@ def parse(fdir, objective, select_types=None, keywords:list|str=[], exclude=[]): too_much_data = True if len(ftypes) > 200: too_much_data = True - if sum([getsize(f) for f in files]) > virtual_memory().available: + if sum([f.state()["st_size"] for f in files]) > virtual_memory().available: too_much_data = True - if not too_much_data: - d = [] - for fpath, dtype in ftypes.items(): - basename = fpath.split('/')[-1].split('.')[0] + if too_much_data: + raise MemoryError("too much data in directory, parsing cancelled to prevent storage overflow") - if dtype.startswith('LabramHR'): - d.append(horiba.fromLabramHR(fpath, name=basename)) + d = [] + for fpath, dtype in ftypes.items(): + basename = fpath.split('/')[-1].split('.')[0] - elif dtype=='Bruker_XRD': - l0 = len(d) - d = d + xrd.fromBruker(fpath) + if dtype.startswith('LabramHR'): + d.append(fromLabramHR(fpath, name=basename)) - elif dtype=='Gwyddion_traces': - d.append(afm.fromGwyddion_traces(fpath, name=None, ID_steps=True)) + elif dtype=='Bruker_XRD': + l0 = len(d) + d = d + fromBruker(fpath) - elif dtype=='iontof_SIMS': - d.append((fpath, iontof.ITApeaks(fpath))) + elif dtype=='Gwyddion_traces': + d.append(fromGwyddion_traces(fpath, name=None, ID_steps=True)) - elif dtype=='TRPL': - l0 = len(d) - d.append(beckerhickl.fromSP130(fpath, name=basename)) - print(basename) + elif dtype=='iontof_SIMS': + d.append((fpath, ITApeaks(fpath))) - elif dtype=='ASCII': - try: - d.append(andor.fromAndorNeo(fpath, name=basename, objective_lens=objective)) - except: - print(f'attempted to extract ASCII data from path <{fpath}> but it was not recognized by the andor module') - print(basename) - - elif dtype=='wt5': - d.append(wt.open(fpath)) - if len(d)==1: - d=d[0] - return d - else: - print("too much data in directory, parsing cancelled to prevent storage overflow") \ No newline at end of file + elif dtype=='TRPL': + l0 = len(d) + d.append(fromSP130(fpath, name=basename)) + print(basename) + + elif dtype=='ASCII': + try: + d.append(fromAndorNeo(fpath, name=basename, objective_lens=objective)) + except: + print(f'attempted to extract ASCII data from path <{fpath}> but it was not recognized by the andor module') + print(basename) + + elif dtype=='wt5': + d.append(wt.open(fpath)) + if len(d)==1: + d=d[0] + return d diff --git a/makeitwright/core/parsers/andor.py b/makeitwright/core/parsers/andor.py new file mode 100644 index 0000000..53d7140 --- /dev/null +++ b/makeitwright/core/parsers/andor.py @@ -0,0 +1,184 @@ +import WrightTools as wt +import numpy as np +import pathlib +from os import fspath + + +def fromAndorNeo(fpath, name=None, objective_lens='prompt', cps=False): + """Create a data object from Andor Solis software (ascii exports). + + Parameters + ---------- + fpath : path-like + Path to file (should be .asc format). + Can be either a local or remote file (http/ftp). + Can be compressed with gz/bz2, decompression based on file name. + name : string (optional) + Name to give to the created data object. If None, filename is used. + Default is None. + + Returns + ------- + data + New data object. + """ + + objective_lenses = { + '5x-Jin' : 0.893, + '20x-Jin' : 3.52, + '100x-Wright' : 18.2, + '5' : 0.893, + '20' : 3.52, + '100' : 18.2, + 5 : 0.893, + 20 : 3.52, + 100 : 18.2 + } + + # parse filepath + filepath = pathlib.Path(fpath) + + if not ".asc" in filepath.suffixes: + wt.exceptions.WrongFileTypeWarning.warn(filepath, ".asc") + # parse name + if name is None: + name = filepath.name.split("/")[-1] + + if objective_lens=='prompt': + objective_lens = input(f'enter magnification for data at {name}: ') + if not objective_lens: + objective_lens = 0 + + # create data + ds = np.DataSource(None) + f = ds.open(fspath(fpath), "rt") + axis0 = [] + arr = [] + attrs = {} + + line0 = f.readline().strip()[:-1] + line0 = [float(x) for x in line0.split(",")] # TODO: robust to space, tab, comma + axis0.append(line0.pop(0)) + arr.append(line0) + + def get_frames(f, arr, axis0): + axis0_written = False + while True: + line = f.readline().strip()[:-1] + if len(line) == 0: + break + else: + line = [float(x) for x in line.split(",")] + # signature of new frames is restart of axis0 + if not axis0_written and (line[0] == axis0[0]): + axis0_written = True + if axis0_written: + line.pop(0) + else: + axis0.append(line.pop(0)) + arr.append(line) + return arr, axis0 + + arr, axis0 = get_frames(f, arr, axis0) + nframes = len(arr) // len(axis0) + + i = 0 + while i < 3: + line = f.readline().strip() + if len(line) == 0: + i += 1 + else: + try: + key, val = line.split(":", 1) + except ValueError: + pass + else: + attrs[key.strip()] = val.strip() + + f.close() + + #create data object + arr = np.array(arr) + axis0 = np.array(axis0) + data = wt.Data(name=name) + if float(attrs["Grating Groove Density (l/mm)"]) == 0: + xname = 'x' + dtype = 'image' + try: + axis0 = axis0/objective_lenses[objective_lens] + xunits = 'µm' + except KeyError: + xunits = 'px' + else: + xname = 'wl' + xunits = 'nm' + dtype = 'spectralprofile' + + axis1 = np.arange(arr.shape[-1]) + yname='y' + try: + axis1 = axis1/objective_lenses[objective_lens] + yunits = 'µm' + except KeyError: + yunits = 'px' + + axes = [xname, yname] + + if nframes == 1: + arr = np.array(arr) + data.create_variable(name=xname, values=axis0[:, None], units=xunits) + data.create_variable(name=yname, values=axis1[None, :], units=yunits) + else: + frames = np.arange(nframes) + try: + ct = float(attrs["Kinetic Cycle Time (secs)"]) + frames = frames*ct + tname = 't' + tunits = 's' + except KeyError: + tname = 'frame' + tunits = None + arr = np.array(arr).reshape(nframes, len(axis0), len(arr[0])) + data.create_variable(name=tname, values=frames[:, None, None], units=tunits) + data.create_variable(name=xname, values=axis0[None, :, None], units=xunits) + data.create_variable(name=yname, values=axis1[None, None, :], units=yunits) + axes = [tname] + axes + + if xname=='wl': + if xunits=='nm': + data[xname].attrs['label'] = "wavelength (nm)" + if xunits=='wn': + data[xname].attrs['label'] = "wavenumber (cm-1)" + if xname=='x': + data[xname].attrs['label'] = "x (µm)" + if yname=='y': + data[yname].attrs['label'] = "y (µm)" + + data.transform(*axes) + if cps: + try: + arr = arr/float(attrs["Exposure Time (secs)"]) + except KeyError: + pass + try: + arr = arr/int(attrs["Number of Accumulations"]) + except KeyError: + pass + + data.create_channel(name='sig', values=arr, signed=False) + if cps: + data['sig'].attrs['label'] = "intensity (cps)" + else: + data['sig'].attrs['label'] = "counts" + + for key, val in attrs.items(): + data.attrs[key] = val + + # finish + print("data created at {0}".format(data.fullpath)) + print(" axes: {0}".format(data.axis_names)) + print(" shape: {0}".format(data.shape)) + data.attrs['dtype']=dtype + + return data + diff --git a/makeitwright/process/afm.py b/makeitwright/core/parsers/gwyddion.py similarity index 96% rename from makeitwright/process/afm.py rename to makeitwright/core/parsers/gwyddion.py index 7a8061b..0891caf 100644 --- a/makeitwright/process/afm.py +++ b/makeitwright/core/parsers/gwyddion.py @@ -1,12 +1,5 @@ -import numpy as np import WrightTools as wt - - -def fromPicoView(filepath, name=None, convert_units=True, flatten_order=0): - """ - under development - """ - raise NotImplementedError +import numpy as np def fromGwyddion_traces(filepath, name=None, convert_units=True, ID_steps=False, flatten=False): @@ -116,4 +109,5 @@ def fromGwyddion_traces(filepath, name=None, convert_units=True, ID_steps=False, if len(data) == 1: data = data[0] - return data \ No newline at end of file + return data + diff --git a/makeitwright/process/horiba.py b/makeitwright/core/parsers/horiba.py similarity index 77% rename from makeitwright/process/horiba.py rename to makeitwright/core/parsers/horiba.py index 2f01b9d..b3a2f7b 100644 --- a/makeitwright/process/horiba.py +++ b/makeitwright/core/parsers/horiba.py @@ -1,70 +1,6 @@ import numpy as np import WrightTools as wt -import makeitwright.styles as styles - -from . import spectralprofile -from . import hyperspectral -from . import helpers - -def central_wavelength(data): - pass - -def plot_image(data, channel, **kwargs): - params = {} - try: - unit = data['wl'].units - except KeyError: - unit = data.constants[0].units - if unit == 'wn': - params.update(styles.image_horiba_Raman) - else: - params.update(styles.image_horiba_PL) - params.update(**kwargs) - - if len(data.axes) == 3: - hyperspectral.plot_image(data, channel, **params) - else: - spectralprofile.plot_image(data, channel, **params) - -def plot_profile(data, channel, profile_axis='y', **kwargs): - params = {} - try: - unit = data['wl'].units - except KeyError: - unit = data.constants[0].units - - if data.axes[1].natural_name == 't': - params.update(styles.profile_horiba_timed_series) - elif unit == 'wn': - params.update(styles.profile_horiba_Raman) - else: - params.update(styles.profile_horiba_PL) - params.update(**kwargs) - - if len(data.axes) == 3: - hyperspectral.plot_profile(data, profile_axis, channel, **params) - else: - spectralprofile.plot_profile(data, channel, **params) - -def plot_decomposition(data, channel, **kwargs): - params = {} - try: - unit = data['wl'].units - except KeyError: - unit = data.constants[0].units - if unit == 'wn': - params.update(styles.decomposition_horiba_Raman) - else: - params.update(styles.decomposition_horiba_PL) - params.update(**kwargs) - - if len(data.axes) == 3: - hyperspectral.plot_decomposition(data, 0, 1, 2, channel, **params) - else: - spectralprofile.plot_decomposition(data, 0, 1, channel, **params) - -def fromAramis(filepath): - print("not ready yet, get to work :)") +from ..helpers import norm def horiba_typeID(filepath): @@ -157,7 +93,7 @@ def fromLabramHR(filepath, name=None, cps=False): d['wl'].label = spectlabels[spectral_units] d.create_channel('sig', values=sig) d['sig'].label = siglabels[spectral_units] - d.create_channel('norm', values=helpers.norm(sig, 0, 1)) + d.create_channel('norm', values=norm(sig, 0, 1)) d['norm'].label = 'norm. ' + siglabels[spectral_units].split(' (')[0] d.transform('wl') d.attrs['dtype'] = 'spectrum' @@ -187,7 +123,7 @@ def fromLabramHR(filepath, name=None, cps=False): spect['wl'].label = spectlabels[spectral_units] spect.create_channel(name='sig', values=sig_i) spect['sig'].label = siglabels[spectral_units] - spect.create_channel(name='norm', values=helpers.norm(sig_i, 0, 1)) + spect.create_channel(name='norm', values=norm(sig_i, 0, 1)) spect['norm'].label = 'norm. ' + siglabels[spectral_units].split(' (')[0] spect.transform('wl') spect.attrs['dtype'] = 'spectrum' @@ -253,4 +189,9 @@ def fromLabramHR(filepath, name=None, cps=False): return d def fromLabramHRTimedSeries(filedir): - pass \ No newline at end of file + raise NotImplementedError + +def fromAramis(filepath): + raise NotImplementedError + + diff --git a/makeitwright/core/parsers/iontof.py b/makeitwright/core/parsers/iontof.py new file mode 100644 index 0000000..0aaa791 --- /dev/null +++ b/makeitwright/core/parsers/iontof.py @@ -0,0 +1,80 @@ +try: + import pySPM +except ImportError: + pass +import numpy as np +import WrightTools as wt + + +def open_ita(fpath): + try: + ita = pySPM.ITA(fpath) + except ModuleNotFoundError: + print(""" + ionTOF support is optional and was not specified at install. + to work with iontof data, please install the optional dependencies + `pip install git+https://github.com/wright-group/makeitwright.git[iontof]` + """ + ) + return ita + + +def fromITA(fpath, name=None, select_channels=None): + + ita = open_ita(fpath) + ita.show_summary() + summ = ita.get_summary() + + xarr = np.linspace(0, summ['fov']*1e6, num=summ['pixels']['x']) + yarr = np.linspace(0, summ['fov']*1e6, num=summ['pixels']['y']) + scarr = np.linspace(1, int(summ['Scans']), num=int(summ['Scans'])) + charrs = {} + if select_channels is not None: + idxs = [] + for peak in summ['peaks']: + if peak['id'] in select_channels or peak['assign'] in select_channels: + idxs = idxs + [peak['id']] + for idx in idxs: + if summ['peaks'][idx]['assign']: + chname = summ['peaks'][idx]['assign'] + elif summ['peaks'][idx]['desc']: + chname = summ['peaks'][idx]['desc'] + else: + chname = str(int(summ['peaks'][idx]['cmass'])) + 'mz' + charr = ita.getImage(idx,0) + for i in range(1,len(scarr)): + j = ita.getImage(idx,i) + charr = np.dstack((charr,j)) + charrs[chname] = charr + print("channel <" + chname + "> found") + else: + for peak in summ['peaks']: + if peak['assign']: + chname = peak['assign'] + elif peak['desc']: + chname = peak['desc'] + else: + chname = str(int(peak['cmass'])) + 'mz' + idx = peak['id'] + charr = ita.getImage(idx,0) + for i in range(1,len(scarr)): + j = ita.getImage(idx,i) + charr = np.dstack((charr,j)) + charrs[chname] = charr + print("channel <" + chname + "> found") + + d = wt.Data() + d.create_variable(name='x', values=xarr[:,None,None], units='um') + d.create_variable(name='y', values=yarr[None,:,None], units='um') + d.create_variable(name='scan', values=scarr[None,None,:], units='s') + for chname, charr in charrs.items(): + d.create_channel(name=chname, values=charr) + d.transform('x','y','scan') + + return d + + +def ITApeaks(fpath): + ita = open_ita(fpath) + summ = ita.get_summary() + return summ['peaks'] diff --git a/makeitwright/core/parsers/sp130.py b/makeitwright/core/parsers/sp130.py new file mode 100644 index 0000000..3912ec4 --- /dev/null +++ b/makeitwright/core/parsers/sp130.py @@ -0,0 +1,33 @@ +import numpy as np +import WrightTools as wt +from ..helpers import norm + +def fromSP130(fpath, name=None): + if fpath.split('.')[-1] != 'asc': + print(f"filetype .{fpath.split('.')[-1]} not supported") + else: + with open(fpath) as f: + txt = f.readlines() + header_size = 0 + for i, line in enumerate(txt): + if 'Title' in line.split() and name is None: + name = line.split()[-1] + if '*BLOCK' in line: + header_size = i+1 + + arr = np.genfromtxt(fpath, delimiter=',', skip_header=header_size, skip_footer=1) + t = arr[:,0] + sig = arr[:,1] + t = t-t[np.argmax(sig)] + + out = wt.Data(name=name) + out.create_variable('t', values=t, units='ns') + out['t'].attrs['label'] = "time (ns)" + out.create_channel('sig', values=sig) + out['sig'].attrs['label'] = "PL counts" + out.transform('t') + out.create_channel('norm', values=norm(out['sig'][:], 0.01, 1)) + out['norm'].attrs['label'] = "norm. PL counts" + + return out + diff --git a/makeitwright/core/parsers/xrd.py b/makeitwright/core/parsers/xrd.py new file mode 100644 index 0000000..46d083d --- /dev/null +++ b/makeitwright/core/parsers/xrd.py @@ -0,0 +1,47 @@ +import numpy as np +import WrightTools as wt +from ..helpers import norm + +def fromBruker(*filepaths): + d = [] + for filepath in filepaths: + dtype = "Locked Coupled" + header_size=None + with open(filepath) as f: + txt = f.readlines() + for i, line in enumerate(txt): + if "ScanType" in line: + dtype = line.split('=')[-1].strip() + if "[Data]" in line: + header_size = i+2 + if header_size is None: + try: + arr = np.genfromtxt(filepath, skip_header=166, delimiter=',') + print("Data header was not identified in file. Data in instance may not reflect complete file information.") + except: + print("Unable to read data from file due to lack of expected data header.") + else: + arr = np.genfromtxt(filepath, skip_header=header_size, delimiter=',') + + if arr.size > 0: + deg_arr = arr[:,0].flatten() + ch_arr = arr[:,1].flatten() + pat = wt.Data(name=filepath.split('/')[-1]) + pat.create_channel('sig', values=ch_arr) + pat.create_channel('norm', values=norm(ch_arr, 1, 100)) + pat.create_channel('log', values=np.log(norm(ch_arr, 1, 100))) + if dtype=="Locked Coupled": + pat.create_variable('ang', values=deg_arr, units='deg') + pat.transform('ang') + pat.attrs['acquisition'] = 'XRD_2theta' + if dtype=="Z-Drive": + pat.create_variable('z', values=deg_arr, units='mm') + pat.transform('z') + pat.attrs['acquisition'] = 'XRD_2theta' + pat.attrs['dtype'] = 'spectrum' + d.append(pat) + else: + print(f'file {filepath} was loaded but had no values') + + return d + diff --git a/makeitwright/spectra.py b/makeitwright/core/spectra.py similarity index 99% rename from makeitwright/spectra.py rename to makeitwright/core/spectra.py index 0b70b76..c05f01e 100644 --- a/makeitwright/spectra.py +++ b/makeitwright/core/spectra.py @@ -3,8 +3,7 @@ import numpy as np import WrightTools as wt from matplotlib import pyplot as plt -from .process import helpers -from . import styles +from . import helpers, styles def plot_spectra(data, **kwargs): diff --git a/makeitwright/process/spectralprofile.py b/makeitwright/core/spectralprofile.py similarity index 99% rename from makeitwright/process/spectralprofile.py rename to makeitwright/core/spectralprofile.py index 17606c0..fbf5fda 100644 --- a/makeitwright/process/spectralprofile.py +++ b/makeitwright/core/spectralprofile.py @@ -4,8 +4,7 @@ import numpy as np import matplotlib as mpl from matplotlib import pyplot as plt -from . import helpers -import makeitwright.styles as styles +from . import helpers, styles def remove_spectral_background(data, channel, spatial_reference_range, name=None, create_background_channel=False, talkback=True): diff --git a/makeitwright/styles.py b/makeitwright/core/styles.py similarity index 100% rename from makeitwright/styles.py rename to makeitwright/core/styles.py diff --git a/makeitwright/horiba.py b/makeitwright/horiba.py new file mode 100644 index 0000000..322a23e --- /dev/null +++ b/makeitwright/horiba.py @@ -0,0 +1,62 @@ +from .core import spectralprofile, hyperspectral, styles + + +def central_wavelength(data): + raise NotImplementedError + + +def plot_image(data, channel, **kwargs): + params = {} + try: + unit = data['wl'].units + except KeyError: + unit = data.constants[0].units + if unit == 'wn': + params.update(styles.image_horiba_Raman) + else: + params.update(styles.image_horiba_PL) + params.update(**kwargs) + + if len(data.axes) == 3: + hyperspectral.plot_image(data, channel, **params) + else: + spectralprofile.plot_image(data, channel, **params) + + +def plot_profile(data, channel, profile_axis='y', **kwargs): + params = {} + try: + unit = data['wl'].units + except KeyError: + unit = data.constants[0].units + + if data.axes[1].natural_name == 't': + params.update(styles.profile_horiba_timed_series) + elif unit == 'wn': + params.update(styles.profile_horiba_Raman) + else: + params.update(styles.profile_horiba_PL) + params.update(**kwargs) + + if len(data.axes) == 3: + hyperspectral.plot_profile(data, profile_axis, channel, **params) + else: + spectralprofile.plot_profile(data, channel, **params) + + +def plot_decomposition(data, channel, **kwargs): + params = {} + try: + unit = data['wl'].units + except KeyError: + unit = data.constants[0].units + if unit == 'wn': + params.update(styles.decomposition_horiba_Raman) + else: + params.update(styles.decomposition_horiba_PL) + params.update(**kwargs) + + if len(data.axes) == 3: + hyperspectral.plot_decomposition(data, 0, 1, 2, channel, **params) + else: + spectralprofile.plot_decomposition(data, 0, 1, channel, **params) diff --git a/makeitwright/process/iontof.py b/makeitwright/iontof.py similarity index 50% rename from makeitwright/process/iontof.py rename to makeitwright/iontof.py index e997070..cd5794b 100644 --- a/makeitwright/process/iontof.py +++ b/makeitwright/iontof.py @@ -1,9 +1,7 @@ import numpy as np import cmocean -import pySPM -import WrightTools as wt -from . import hyperspectral, styles -from . import helpers +from .core import hyperspectral, styles, helpers + def relative_proportion(data, channel0, channel1): """ @@ -47,6 +45,7 @@ def relative_proportion(data, channel0, channel1): data.create_channel(ch_name, values=ch_arr, verbose=True) data[ch_name].signed = True + def plot_image(data, channel, **kwargs): params = {} @@ -57,6 +56,7 @@ def plot_image(data, channel, **kwargs): hyperspectral.plot_image(data, channel, **params) + def plot_profile(data, profile_axis, channel, **kwargs): params = {} @@ -67,6 +67,7 @@ def plot_profile(data, profile_axis, channel, **kwargs): hyperspectral.plot_profile(data, profile_axis, channel, **params) + def plot_depth_trace(data, channel, **kwargs): params = {} @@ -75,61 +76,3 @@ def plot_depth_trace(data, channel, **kwargs): hyperspectral.plot_decomposition(data, 'x', 'y', 'scan', channel, **kwargs) -def ITApeaks(fpath): - ita=pySPM.ITA(fpath) - summ = ita.get_summary() - return summ['peaks'] - -def fromITA(fpath, name=None, select_channels=None): - - ita = pySPM.ITA(fpath) - ita.show_summary() - summ = ita.get_summary() - - xarr = np.linspace(0, summ['fov']*1e6, num=summ['pixels']['x']) - yarr = np.linspace(0, summ['fov']*1e6, num=summ['pixels']['y']) - scarr = np.linspace(1, int(summ['Scans']), num=int(summ['Scans'])) - charrs = {} - if select_channels is not None: - idxs = [] - for peak in summ['peaks']: - if peak['id'] in select_channels or peak['assign'] in select_channels: - idxs = idxs + [peak['id']] - for idx in idxs: - if summ['peaks'][idx]['assign']: - chname = summ['peaks'][idx]['assign'] - elif summ['peaks'][idx]['desc']: - chname = summ['peaks'][idx]['desc'] - else: - chname = str(int(summ['peaks'][idx]['cmass'])) + 'mz' - charr = ita.getImage(idx,0) - for i in range(1,len(scarr)): - j = ita.getImage(idx,i) - charr = np.dstack((charr,j)) - charrs[chname] = charr - print("channel <" + chname + "> found") - else: - for peak in summ['peaks']: - if peak['assign']: - chname = peak['assign'] - elif peak['desc']: - chname = peak['desc'] - else: - chname = str(int(peak['cmass'])) + 'mz' - idx = peak['id'] - charr = ita.getImage(idx,0) - for i in range(1,len(scarr)): - j = ita.getImage(idx,i) - charr = np.dstack((charr,j)) - charrs[chname] = charr - print("channel <" + chname + "> found") - - d = wt.Data() - d.create_variable(name='x', values=xarr[:,None,None], units='um') - d.create_variable(name='y', values=yarr[None,:,None], units='um') - d.create_variable(name='scan', values=scarr[None,None,:], units='s') - for chname, charr in charrs.items(): - d.create_channel(name=chname, values=charr) - d.transform('x','y','scan') - - return d \ No newline at end of file diff --git a/makeitwright/process/__init__.py b/makeitwright/process/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/makeitwright/process/transmittance_references/Ag-P01.csv b/makeitwright/transmittance_references/Ag-P01.csv similarity index 100% rename from makeitwright/process/transmittance_references/Ag-P01.csv rename to makeitwright/transmittance_references/Ag-P01.csv diff --git a/makeitwright/process/transmittance_references/Ag-P02.csv b/makeitwright/transmittance_references/Ag-P02.csv similarity index 100% rename from makeitwright/process/transmittance_references/Ag-P02.csv rename to makeitwright/transmittance_references/Ag-P02.csv diff --git a/makeitwright/process/transmittance_references/BK7.csv b/makeitwright/transmittance_references/BK7.csv similarity index 100% rename from makeitwright/process/transmittance_references/BK7.csv rename to makeitwright/transmittance_references/BK7.csv diff --git a/makeitwright/process/transmittance_references/CaF2.csv b/makeitwright/transmittance_references/CaF2.csv similarity index 100% rename from makeitwright/process/transmittance_references/CaF2.csv rename to makeitwright/transmittance_references/CaF2.csv diff --git a/makeitwright/process/transmittance_references/MgF2.csv b/makeitwright/transmittance_references/MgF2.csv similarity index 100% rename from makeitwright/process/transmittance_references/MgF2.csv rename to makeitwright/transmittance_references/MgF2.csv diff --git a/makeitwright/process/transmittance_references/UVAl.csv b/makeitwright/transmittance_references/UVAl.csv similarity index 100% rename from makeitwright/process/transmittance_references/UVAl.csv rename to makeitwright/transmittance_references/UVAl.csv diff --git a/makeitwright/process/transmittance_references/UVFS.csv b/makeitwright/transmittance_references/UVFS.csv similarity index 100% rename from makeitwright/process/transmittance_references/UVFS.csv rename to makeitwright/transmittance_references/UVFS.csv diff --git a/makeitwright/process/transmittance_references/sapphire.csv b/makeitwright/transmittance_references/sapphire.csv similarity index 100% rename from makeitwright/process/transmittance_references/sapphire.csv rename to makeitwright/transmittance_references/sapphire.csv diff --git a/makeitwright/process/xrd.py b/makeitwright/xrd.py similarity index 59% rename from makeitwright/process/xrd.py rename to makeitwright/xrd.py index 681528d..518c87e 100644 --- a/makeitwright/process/xrd.py +++ b/makeitwright/xrd.py @@ -3,61 +3,15 @@ from scipy.optimize import curve_fit from scipy.stats import pearsonr import WrightTools as wt -import makeitwright.spectra as spectra, styles -from .helpers import norm, roi +from .core import spectra, styles +from .core.helpers import norm, roi -pi = np.pi - - -def fromBruker(*filepaths): - d = [] - for filepath in filepaths: - dtype = "Locked Coupled" - header_size=None - with open(filepath) as f: - txt = f.readlines() - for i, line in enumerate(txt): - if "ScanType" in line: - dtype = line.split('=')[-1].strip() - if "[Data]" in line: - header_size = i+2 - if header_size is None: - try: - arr = np.genfromtxt(filepath, skip_header=166, delimiter=',') - print("Data header was not identified in file. Data in instance may not reflect complete file information.") - except: - print("Unable to read data from file due to lack of expected data header.") - else: - arr = np.genfromtxt(filepath, skip_header=header_size, delimiter=',') - - if arr.size > 0: - deg_arr = arr[:,0].flatten() - ch_arr = arr[:,1].flatten() - pat = wt.Data(name=filepath.split('/')[-1]) - pat.create_channel('sig', values=ch_arr) - pat.create_channel('norm', values=norm(ch_arr, 1, 100)) - pat.create_channel('log', values=np.log(norm(ch_arr, 1, 100))) - if dtype=="Locked Coupled": - pat.create_variable('ang', values=deg_arr, units='deg') - pat.transform('ang') - pat.attrs['acquisition'] = 'XRD_2theta' - if dtype=="Z-Drive": - pat.create_variable('z', values=deg_arr, units='mm') - pat.transform('z') - pat.attrs['acquisition'] = 'XRD_2theta' - pat.attrs['dtype'] = 'spectrum' - d.append(pat) - else: - print(f'file {filepath} was loaded but had no values') - - return d - def get_fits(data, channel='norm', function='gauss', xrange='all'): def gauss(x, a, u, s): return a*np.exp(-((x-u)/(2*s))**2) def cauchy(x, a, u, s): - return a/(pi*s*(1+((x-u)/s)**2)) + return a/(np.pi*s*(1+((x-u)/s)**2)) functions = { 'gauss' : gauss, diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000..f7a5929 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,37 @@ +[build-system] +requires = ["flit_core >= 3.12.0, <4"] +build-backend = "flit_core.buildapi" + +[project] +name = "makeitwright" +description = "Plotting and parsing tools for the Jin Group." +authors = [{name="Chris Roy"}] +maintainers = [{name="Dan Kohler"}] +dynamic = ["version"] +requires-python = ">=3.7" +readme = "README.md" +dependencies = [ + "psutil", + "wrighttools", + "cmocean", +] +classifiers = [ + "Development Status :: 4 - Beta", + "Intended Audience :: Science/Research", + "License :: OSI Approved :: GNU Lesser General Public License v3 (LGPLv3)", + "Natural Language :: English", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.7", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Topic :: Scientific/Engineering", +] + +[project.optional-dependencies] +iontof = ["pySPM"] + +[tool.pytest.ini_options] +testpaths = [ + "tests", +] \ No newline at end of file diff --git a/tests/test_import.py b/tests/test_import.py new file mode 100644 index 0000000..0c27ebe --- /dev/null +++ b/tests/test_import.py @@ -0,0 +1,5 @@ +import makeitwright as mw + + +def test_import(): + mw.helpers.roi