Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
17 changes: 8 additions & 9 deletions examples/PLProcessing.py
Original file line number Diff line number Diff line change
@@ -1,27 +1,27 @@

# Process PL Data from Wright group

import pathlib
import makeitwright as mw

from makeitwright import datasets

andor = mw.andor
roi = mw.helpers.roi
parse = mw.parsers.parse
plot = mw.spectra.plot_spectra

p = datasets.PL
filepath = p.parent
filename = p.stem

filepath = pathlib.Path().expanduser() / "Desktop" / "Research Data" / "Wright Table" / "Original" / "test"
filename = "PEAPbI on FPEASnI PL 77K 4 2 hour wait for cool"
obj = 10 # Objective magnification (5x, 10x, 50x, 100x)
ROI_lower = 1000 # Lower and upper bounds of ROI
ROI_upper = 1047
obj = 10 # Objective magnification (5x, 10x, 50x, 100x)
ROI_lower = 575 # Lower and upper bounds of ROI
ROI_upper = 600
plotx_lower = 500 # Lower and upper bounds of built-in plot x-axis
plotx_upper = 800


# Read data
data = parse(filepath, objective=obj, keywords=filename + ".asc")
data = parse(filepath, objective=str(obj), keywords=filename + ".asc")


# Check object area
Expand All @@ -34,7 +34,6 @@
if con == '1':
quit()


# Process PL data
PL_ROI = roi(data, {'y': ([ROI_lower, ROI_upper], 'average')})
plot(PL_ROI, channel=0, xrange=[plotx_lower, plotx_upper]) # Can add vrange=[ , ] (y axis scale)
Expand Down
25 changes: 19 additions & 6 deletions makeitwright/core/parsers/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,18 @@
from .xrd import fromBruker


px_per_um = {
'5x-Jin' : 0.893,
'20x-Jin' : 3.52,
'100x-Wright' : 18.2,
'5' : 0.893,
'20' : 3.52,
'100' : 18.2,
}
# add 10x for now with approximation
px_per_um["10"] = px_per_um["10x-Jin"] = 2 * px_per_um["5"]


def typeID(*fpaths):
"""
Infer what kind of data the file contains.
Expand Down Expand Up @@ -78,14 +90,14 @@ def parse(fdir, objective, select_types=None, keywords:list|str=[], exclude=[]):
keywords = [keywords]
for kw in keywords:
for i, f in enumerate(files):
if kw not in f:
if kw not in str(f):
include[i]=0
if exclude:
if type(exclude) is not list:
exclude = [exclude]
for x in exclude:
for i, f in enumerate(files):
if x in f:
if x in str(f):
include[i]=0

files = [file for i, file in zip(include, files) if i]
Expand Down Expand Up @@ -118,15 +130,15 @@ def parse(fdir, objective, select_types=None, keywords:list|str=[], exclude=[]):
too_much_data = True
if len(ftypes) > 200:
too_much_data = True
if sum([f.state()["st_size"] for f in files]) > virtual_memory().available:
if sum([f.stat().st_size for f in files]) > virtual_memory().available:
too_much_data = True

if too_much_data:
raise MemoryError("too much data in directory, parsing cancelled to prevent storage overflow")

d = []
for fpath, dtype in ftypes.items():
basename = fpath.split('/')[-1].split('.')[0]
basename = fpath.stem

if dtype.startswith('LabramHR'):
d.append(fromLabramHR(fpath, name=basename))
Expand All @@ -148,9 +160,10 @@ def parse(fdir, objective, select_types=None, keywords:list|str=[], exclude=[]):

elif dtype=='ASCII':
try:
d.append(fromAndorNeo(fpath, name=basename, objective_lens=objective))
except:
d.append(fromAndorNeo(fpath, name=basename, px_per_um=px_per_um[objective] if objective else None))
except Exception as e:
print(f'attempted to extract ASCII data from path <{fpath}> but it was not recognized by the andor module')
raise e
print(basename)

elif dtype=='wt5':
Expand Down
177 changes: 22 additions & 155 deletions makeitwright/core/parsers/andor.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,11 @@
import WrightTools as wt
import numpy as np
import pathlib
from os import fspath


def fromAndorNeo(fpath, name=None, objective_lens='prompt', cps=False):
# builtin conversion from pixels to um (from somebody's records)
# for different objectives
# roughly, pixel size in microns / magnification

def fromAndorNeo(fpath, name=None, px_per_um=None):
"""Create a data object from Andor Solis software (ascii exports).

Parameters
Expand All @@ -16,169 +17,35 @@ def fromAndorNeo(fpath, name=None, objective_lens='prompt', cps=False):
name : string (optional)
Name to give to the created data object. If None, filename is used.
Default is None.
px_per_um : float-like (optional)
if present, camera spatial dimensions will be mapped in micron units.
if not present, spatial variables of camera will be a unitless index

Returns
-------
data
New data object.
"""

objective_lenses = {
'5x-Jin' : 0.893,
'20x-Jin' : 3.52,
'100x-Wright' : 18.2,
'5' : 0.893,
'20' : 3.52,
'100' : 18.2,
5 : 0.893,
20 : 3.52,
100 : 18.2
}

# parse filepath
filepath = pathlib.Path(fpath)

if not ".asc" in filepath.suffixes:
wt.exceptions.WrongFileTypeWarning.warn(filepath, ".asc")
# parse name
if name is None:
name = filepath.name.split("/")[-1]
data:wt.Data = wt.data.from_Solis(fpath, name=name, verbose=True)
data.rename_variables(xindex="x", yindex="y", wm="wl")
data.rename_channels(signal="sig")

if objective_lens=='prompt':
objective_lens = input(f'enter magnification for data at {name}: ')
if not objective_lens:
objective_lens = 0
for var in {"x", "y"} & set(data.variable_names):
if px_per_um:
data[var][:] = data[var][:] / px_per_um
data[var].units = 'µm'

# create data
ds = np.DataSource(None)
f = ds.open(fspath(fpath), "rt")
axis0 = []
arr = []
attrs = {}
dtype = "image" if "x" in data.variable_names else "spectralprofile"
data.attrs.update(dtype=dtype)

line0 = f.readline().strip()[:-1]
line0 = [float(x) for x in line0.split(",")] # TODO: robust to space, tab, comma
axis0.append(line0.pop(0))
arr.append(line0)

def get_frames(f, arr, axis0):
axis0_written = False
while True:
line = f.readline().strip()[:-1]
if len(line) == 0:
break
else:
line = [float(x) for x in line.split(",")]
# signature of new frames is restart of axis0
if not axis0_written and (line[0] == axis0[0]):
axis0_written = True
if axis0_written:
line.pop(0)
else:
axis0.append(line.pop(0))
arr.append(line)
return arr, axis0
if "wl" in data.variable_names:
data["wl"].attrs['label'] = "wavelength (nm)" if data["wl"].units == "nm" else "wavenumber (cm-1)"

arr, axis0 = get_frames(f, arr, axis0)
nframes = len(arr) // len(axis0)

i = 0
while i < 3:
line = f.readline().strip()
if len(line) == 0:
i += 1
else:
try:
key, val = line.split(":", 1)
except ValueError:
pass
else:
attrs[key.strip()] = val.strip()

f.close()

#create data object
arr = np.array(arr)
axis0 = np.array(axis0)
data = wt.Data(name=name)
if float(attrs["Grating Groove Density (l/mm)"]) == 0:
xname = 'x'
dtype = 'image'
try:
axis0 = axis0/objective_lenses[objective_lens]
xunits = 'µm'
except KeyError:
xunits = 'px'
if data.sig.units == "Hz":
data.sig.label = "intensity (cps)"
else:
xname = 'wl'
xunits = 'nm'
dtype = 'spectralprofile'

axis1 = np.arange(arr.shape[-1])
yname='y'
try:
axis1 = axis1/objective_lenses[objective_lens]
yunits = 'µm'
except KeyError:
yunits = 'px'

axes = [xname, yname]
data.sig.label = "counts"

if nframes == 1:
arr = np.array(arr)
data.create_variable(name=xname, values=axis0[:, None], units=xunits)
data.create_variable(name=yname, values=axis1[None, :], units=yunits)
else:
frames = np.arange(nframes)
try:
ct = float(attrs["Kinetic Cycle Time (secs)"])
frames = frames*ct
tname = 't'
tunits = 's'
except KeyError:
tname = 'frame'
tunits = None
arr = np.array(arr).reshape(nframes, len(axis0), len(arr[0]))
data.create_variable(name=tname, values=frames[:, None, None], units=tunits)
data.create_variable(name=xname, values=axis0[None, :, None], units=xunits)
data.create_variable(name=yname, values=axis1[None, None, :], units=yunits)
axes = [tname] + axes

if xname=='wl':
if xunits=='nm':
data[xname].attrs['label'] = "wavelength (nm)"
if xunits=='wn':
data[xname].attrs['label'] = "wavenumber (cm-1)"
if xname=='x':
data[xname].attrs['label'] = "x (µm)"
if yname=='y':
data[yname].attrs['label'] = "y (µm)"

data.transform(*axes)
if cps:
try:
arr = arr/float(attrs["Exposure Time (secs)"])
except KeyError:
pass
try:
arr = arr/int(attrs["Number of Accumulations"])
except KeyError:
pass

data.create_channel(name='sig', values=arr, signed=False)
if cps:
data['sig'].attrs['label'] = "intensity (cps)"
else:
data['sig'].attrs['label'] = "counts"

for key, val in attrs.items():
data.attrs[key] = val

# finish
print("data created at {0}".format(data.fullpath))
print(" axes: {0}".format(data.axis_names))
print(" shape: {0}".format(data.shape))
data.attrs['dtype']=dtype

return data

Loading