Skip to content

Commit

Permalink
ref: migrate a lot of code to afmformats
Browse files Browse the repository at this point in the history
  • Loading branch information
paulmueller committed Jun 28, 2021
1 parent cb9d46c commit c96eb64
Show file tree
Hide file tree
Showing 14 changed files with 135 additions and 366 deletions.
3 changes: 3 additions & 0 deletions CHANGELOG
Original file line number Diff line number Diff line change
@@ -1,3 +1,6 @@
1.7.5
- ref: migrate `QMap` and `Group` code to afmformats 0.14.0
- ref: `Indentation` is now a subclass for `afmformats.AFMForceDistance`
1.7.4
- enh: allow passing metadata to the IndentationGroup initializer
- setup: bump afmformats from 0.10.2 to 0.13.2
Expand Down
2 changes: 1 addition & 1 deletion nanite/cli/rating.py
Original file line number Diff line number Diff line change
Expand Up @@ -193,7 +193,7 @@ def fit_perform(path, path_results, profile_path=PROFILE_PATH):
with ptsv.open(mode="w") as ts:
ts.write(header + "\n")
# get all files in path
datapaths = afmformats.find_data(path, mode="force-distance")
datapaths = afmformats.find_data(path, modality="force-distance")
with tifffile.TiffWriter(fspath(ptif), imagej=True) as tf, \
ptsv.open(mode="a") as ts:
for pp in datapaths:
Expand Down
103 changes: 19 additions & 84 deletions nanite/group.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
import pathlib

import afmformats

from .indent import Indentation
from .read import load_data

Expand All @@ -25,103 +27,36 @@ def load_group(path, callback=None, meta_override=None):
Indentation group with force-distance data
"""
path = pathlib.Path(path)
data = load_data(path, callback=callback, meta_override=meta_override)
data = load_data(path,
callback=callback,
meta_override=meta_override,
)
grp = IndentationGroup()
for dd in data:
grp.append(Indentation(dd))
grp += data
grp.path = path
return grp


class IndentationGroup(object):
def __init__(self, path=None, callback=None, meta_override=None):
class IndentationGroup(afmformats.AFMGroup):
def __init__(self, path=None, meta_override=None, callback=None):
"""Group of Indentation
Parameters
----------
path: str or pathlib.Path or None
The path to the data file. The data format is determined
and the file is loaded using :ref:`afmformats:index`.
meta_override: dict
if specified, contains key-value pairs of metadata that
should be used when loading the files
(see :data:`afmformats.meta.META_FIELDS`)
callback: callable or None
A method that accepts a float between 0 and 1
to externally track the process of loading the data.
"""
if path is not None:
path = pathlib.Path(path)
self._mmlist = []

if path is not None:
self += load_group(path,
callback=callback,
meta_override=meta_override)

self.path = path

def __add__(self, grp):
out = IndentationGroup()
out._mmlist = self._mmlist + grp._mmlist
return out

def __iadd__(self, grp):
self._mmlist += grp._mmlist
self.path = None
return self

def __iter__(self):
return iter(self._mmlist)

def __getitem__(self, idx):
return self._mmlist[idx]

def __len__(self):
return len(self._mmlist)

def __repr__(self):
rep = ["IndentationGroup: '{}'".format(self.path)]
for idnt in self._mmlist:
rep.append("- {}".format(idnt))
return "\n".join(rep)

def get_enum(self, enum):
"""Return the indentation curve with this enum value
Raises
------
ValueError if multiple curves with the same enum value exist.
KeyErrir if the enum value is not found
"""
curves = []
for item in self._mmlist:
if item.enum == enum:
curves.append(item)
if len(curves) == 0:
raise KeyError("Could not find dataset with enum {}".format(enum))
elif len(curves) == 1:
return curves[0]
else:
raise ValueError("Multiple curves with the same enum value exist!")

def append(self, item):
"""Append an indentation dataset
Parameters
----------
item: nanite.indent.Indentation
Force-indentation dataset
"""
if not isinstance(item, Indentation):
raise ValueError("`item` must be an instance of `Indentation`!")
self._mmlist.append(item)

def index(self, item):
return self._mmlist.index(item)

def subgroup_with_path(self, path):
"""Return a subgroup with measurements matching `path`"""
path = pathlib.Path(path)
subgroup = IndentationGroup()
for idnt in self:
if pathlib.Path(idnt.path).resolve() == path.resolve():
subgroup.append(idnt)
subgroup.path = path
return subgroup
super(IndentationGroup, self).__init__(
path=path,
meta_override=meta_override,
callback=callback,
data_classes_by_modality={"force-distance": Indentation}
)
65 changes: 24 additions & 41 deletions nanite/indent.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,9 @@
from collections import OrderedDict
import copy
import inspect
import warnings

import afmformats
import lmfit
import numpy as np
import scipy.signal as spsig
Expand All @@ -13,21 +15,12 @@
from .rate import get_rater


class Indentation(object):
def __init__(self, idnt_data):
"""Force-indentation
Parameters
----------
idnt_data: nanite.read.IndentationData
Object holding the experimental data
"""
self.metadata = idnt_data.metadata
self.path = idnt_data.path
self.enum = idnt_data.enum

#: All data as afmformats.AFMForceDistance
self.data = idnt_data
class Indentation(afmformats.AFMForceDistance):
def __init__(self, data, metadata, diskcache=None):
"""Additional functionalities for afmformats.AFMForceDistance"""
super(Indentation, self).__init__(data=data,
metadata=metadata,
diskcache=diskcache)
#: Default preprocessing steps steps,
#: see :func:`Indentation.apply_preprocessing`.
self.preprocessing = []
Expand All @@ -39,26 +32,16 @@ def __init__(self, idnt_data):

# Store initial parameters for reset (see `self.reset`)
frame = inspect.currentframe()
args, _, _, values = inspect.getargvalues(frame)
self._init_kwargs = {}
args.remove("self")
for arg in args:
self._init_kwargs[arg] = copy.deepcopy(values[arg])

def __contains__(self, key):
return self.data.__contains__(key)
iargs, _, _, values = inspect.getargvalues(frame)
self._init_kwargs = {
"data": copy.deepcopy(data),
"metadata": copy.deepcopy(metadata)
}

def __getitem__(self, key):
return self.data.__getitem__(key)

def __setitem__(self, key, value):
return self.data.__setitem__(key, value)

def __repr__(self):
return "Indentation {: 6d} in '{}'".format(
self.enum,
self.path
)
@property
def data(self):
warnings.warn("Please use __getitem__ instead!")
return self

@property
def fit_properties(self):
Expand Down Expand Up @@ -99,7 +82,7 @@ def apply_preprocessing(self, preprocessing=None):
# Check availability of axes
for ax in ["x_axis", "y_axis"]:
# make sure the fitting axes are defined
if ax in fp and not fp[ax] in self.data:
if ax in fp and not fp[ax] in self:
fp.pop(ax)
# Set new fit properties
self.fit_properties = fp
Expand Down Expand Up @@ -302,7 +285,7 @@ def estimate_contact_point_index(self):
fail.
"""
# get data
y0 = np.array(self.data["force"], copy=True)
y0 = np.array(self["force"], copy=True)
# Only use the (initial) approach part of the curve.
idmax = np.argmax(y0)
y = y0[:idmax]
Expand All @@ -318,10 +301,6 @@ def estimate_contact_point_index(self):

return idp

def export(self, path, fmt="tab"):
"""Saves the current data as tab separated values"""
self.data.export(path, fmt=fmt)

def fit_model(self, **kwargs):
"""Fit the approach-retract data to a model function
Expand Down Expand Up @@ -480,6 +459,10 @@ def rate_quality(self, regressor="Extra Trees", training_set="zef18",
training_set: str
A label for a training set shipped with nanite or a
path to a training set.
names: list of str
Only use these features for rating
lda: bool
Perform linear discriminant analysis
Returns
-------
Expand All @@ -492,7 +475,7 @@ def rate_quality(self, regressor="Extra Trees", training_set="zef18",
The rating is cached based on the fitting hash
(see `IndentationFitter._hash`).
"""
if (self.fit_properties and "hash" in self.fit_properties):
if self.fit_properties and "hash" in self.fit_properties:
curhash = self.fit_properties["hash"]
else:
curhash = "none"
Expand Down
4 changes: 2 additions & 2 deletions nanite/model/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,8 +78,8 @@ def get_anc_parms(idnt, model_key):
and "params_fitted" in idnt.fit_properties
and "contact_point" in idnt.fit_properties["params_fitted"]):
cp = idnt.fit_properties["params_fitted"]["contact_point"].value
idmax = idnt.data.appr["fit"].argmax()
mi = idnt.data.appr["tip position"][idmax]
idmax = idnt.appr["fit"].argmax()
mi = idnt.appr["tip position"][idmax]
mival = (cp-mi)
else:
mival = np.nan
Expand Down
28 changes: 14 additions & 14 deletions nanite/preproc.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ def compute_tip_position(apret):
k = apret.metadata["spring constant"]
force = apret["force"]
zcant = apret["height (measured)"]
apret.data["tip position"] = zcant + force/k
apret["tip position"] = zcant + force/k
else:
missing = []
if not has_hm:
Expand All @@ -88,9 +88,9 @@ def correct_force_offset(apret):
"""
idp = apret.estimate_contact_point_index()
if idp:
apret.data["force"] -= np.average(apret.data["force"][:idp])
apret["force"] -= np.average(apret["force"][:idp])
else:
apret.data["force"] -= apret.data["force"][0]
apret["force"] -= apret["force"][0]

@staticmethod
def correct_tip_offset(apret):
Expand All @@ -100,7 +100,7 @@ def correct_tip_offset(apret):
contact point.
"""
cpid = apret.estimate_contact_point_index()
apret.data["tip position"] -= apret.data["tip position"][cpid]
apret["tip position"] -= apret["tip position"][cpid]

@staticmethod
def correct_split_approach_retract(apret):
Expand All @@ -115,8 +115,8 @@ def correct_split_approach_retract(apret):
To repair this time lag, we append parts of the retract curve to the
approach curve, such that the curves are split at the minimum height.
"""
x = np.array(apret.data["tip position"], copy=True)
y = np.array(apret.data["force"], copy=True)
x = np.array(apret["tip position"], copy=True)
y = np.array(apret["force"], copy=True)

idp = apret.estimate_contact_point_index()
if idp:
Expand All @@ -133,7 +133,7 @@ def correct_split_approach_retract(apret):

idmin = np.argmax(x**2+y**2)

segment = np.zeros(len(apret.data), dtype=bool)
segment = np.zeros(len(apret), dtype=bool)
segment[idmin:] = True
apret["segment"] = segment
else:
Expand All @@ -148,20 +148,20 @@ def smooth_height(apret):
For the columns "height (measured)" and "tip position",
and for the approach and retract data separately, this
method adds the columns "height (measured, smoothed)" and
"tip position (smoothed)" to `self.data`.
"tip position (smoothed)" to `apret`.
"""
orig = ["height (measured)",
"tip position"]
dest = ["height (measured, smoothed)",
"tip position (smoothed)"]
for o, d in zip(orig, dest):
if o not in apret.data.columns:
if o not in apret.columns:
continue
# Get approach and retract data
app_idx = ~apret.data["segment"]
app = np.array(apret.data[o][app_idx])
ret_idx = apret.data["segment"]
ret = np.array(apret.data[o][ret_idx])
app_idx = ~apret["segment"]
app = np.array(apret[o][app_idx])
ret_idx = apret["segment"]
ret = np.array(apret[o][ret_idx])
# Apply smoothing
sm_app = smooth_axis_monotone(app)
sm_ret = smooth_axis_monotone(ret)
Expand All @@ -172,7 +172,7 @@ def smooth_height(apret):
assert(np.all(end-begin > 0)), "Found retract before approach!"

# If everything is ok, we can add the new columns
apret.data[d] = np.concatenate((sm_app, sm_ret))
apret[d] = np.concatenate((sm_app, sm_ret))


#: Available preprocessors
Expand Down

0 comments on commit c96eb64

Please sign in to comment.