Skip to content

Commit

Permalink
- Improve transforms, implement drift correction transforms, require …
Browse files Browse the repository at this point in the history
…from_ methods for instantiation.

- Replace transform, drift and beadfile arguments for Imread by with_transform method.
- Bring Imread.transpose in line with numpy.transpose.
- Fix seqread.lazy_property.
  • Loading branch information
Wim Pomp committed Nov 2, 2023
1 parent 2e56f45 commit d13b702
Show file tree
Hide file tree
Showing 10 changed files with 445 additions and 505 deletions.
532 changes: 150 additions & 382 deletions ndbioimage/__init__.py

Large diffs are not rendered by default.

24 changes: 12 additions & 12 deletions ndbioimage/jvm.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,35 +19,35 @@ def __new__(cls, *args):
def __init__(self, jars=None):
if not self.vm_started and not self.vm_killed:
try:
jarpath = Path(__file__).parent / 'jars'
jar_path = Path(__file__).parent / 'jars'
if jars is None:
jars = {}
for jar, src in jars.items():
if not (jarpath / jar).exists():
JVM.download(src, jarpath / jar)
classpath = [str(jarpath / jar) for jar in jars.keys()]
if not (jar_path / jar).exists():
JVM.download(src, jar_path / jar)
classpath = [str(jar_path / jar) for jar in jars.keys()]

import jpype
jpype.startJVM(classpath=classpath)
except Exception:
except Exception: # noqa
self.vm_started = False
else:
self.vm_started = True
try:
import jpype.imports
from loci.common import DebugTools
from loci.formats import ImageReader
from loci.formats import ChannelSeparator
from loci.formats import FormatTools
from loci.formats import MetadataTools
from loci.common import DebugTools # noqa
from loci.formats import ImageReader # noqa
from loci.formats import ChannelSeparator # noqa
from loci.formats import FormatTools # noqa
from loci.formats import MetadataTools # noqa

DebugTools.setRootLevel("ERROR")

self.image_reader = ImageReader
self.channel_separator = ChannelSeparator
self.format_tools = FormatTools
self.metadata_tools = MetadataTools
except Exception:
except Exception: # noqa
pass

if self.vm_killed:
Expand All @@ -64,7 +64,7 @@ def kill_vm(cls):
self = cls._instance
if self is not None and self.vm_started and not self.vm_killed:
import jpype
jpype.shutdownJVM()
jpype.shutdownJVM() # noqa
self.vm_started = False
self.vm_killed = True

Expand Down
4 changes: 2 additions & 2 deletions ndbioimage/readers/bfread.py
Original file line number Diff line number Diff line change
Expand Up @@ -153,7 +153,7 @@ def run(self):
self.queue_out.put(image[..., c])
else:
self.queue_out.put(image)
except queues.Empty:
except queues.Empty: # noqa
continue
except (Exception,):
print_exc()
Expand All @@ -171,7 +171,7 @@ def can_open(path):
except (Exception,):
return False
finally:
jvm.kill_vm()
jvm.kill_vm() # noqa


class Reader(AbstractReader, ABC):
Expand Down
24 changes: 12 additions & 12 deletions ndbioimage/readers/cziread.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ def open(self):
filedict[c, z, t].append(directory_entry)
else:
filedict[c, z, t] = [directory_entry]
self.filedict = filedict
self.filedict = filedict # noqa

def close(self):
self.reader.close()
Expand Down Expand Up @@ -116,8 +116,8 @@ def def_list(item):
y_max = max([f.start[f.axes.index('Y')] + f.shape[f.axes.index('Y')] for f in self.filedict[0, 0, 0]])
size_x = x_max - x_min
size_y = y_max - y_min
size_c, size_z, size_t = [self.reader.shape[self.reader.axes.index(directory_entry)]
for directory_entry in 'CZT']
size_c, size_z, size_t = (self.reader.shape[self.reader.axes.index(directory_entry)]
for directory_entry in 'CZT')

image = information.find("Image")
pixel_type = text(image.find("PixelType"), "Gray16")
Expand Down Expand Up @@ -277,23 +277,23 @@ def def_list(item):
text(light_source.find("LightSourceType").find("Laser").find("Wavelength")))))

multi_track_setup = acquisition_block.find("MultiTrackSetup")
for idx, tube_lens in enumerate(set(text(track_setup.find("TubeLensPosition"))
for track_setup in multi_track_setup)):
for idx, tube_lens in enumerate({text(track_setup.find("TubeLensPosition"))
for track_setup in multi_track_setup}):
ome.instruments[0].objectives.append(
model.Objective(id=f"Objective:Tubelens:{idx}", model=tube_lens,
nominal_magnification=float(
re.findall(r'\d+[,.]\d*', tube_lens)[0].replace(',', '.'))
))

for idx, filter_ in enumerate(set(text(beam_splitter.find("Filter"))
for track_setup in multi_track_setup
for beam_splitter in track_setup.find("BeamSplitters"))):
for idx, filter_ in enumerate({text(beam_splitter.find("Filter"))
for track_setup in multi_track_setup
for beam_splitter in track_setup.find("BeamSplitters")}):
ome.instruments[0].filter_sets.append(
model.FilterSet(id=f"FilterSet:{idx}", model=filter_)
)

for idx, collimator in enumerate(set(text(track_setup.find("FWFOVPosition"))
for track_setup in multi_track_setup)):
for idx, collimator in enumerate({text(track_setup.find("FWFOVPosition"))
for track_setup in multi_track_setup}):
ome.instruments[0].filters.append(model.Filter(id=f"Filter:Collimator:{idx}", model=collimator))

x_min = min([f.start[f.axes.index('X')] for f in self.filedict[0, 0, 0]])
Expand All @@ -302,8 +302,8 @@ def def_list(item):
y_max = max([f.start[f.axes.index('Y')] + f.shape[f.axes.index('Y')] for f in self.filedict[0, 0, 0]])
size_x = x_max - x_min
size_y = y_max - y_min
size_c, size_z, size_t = [self.reader.shape[self.reader.axes.index(directory_entry)]
for directory_entry in 'CZT']
size_c, size_z, size_t = (self.reader.shape[self.reader.axes.index(directory_entry)]
for directory_entry in 'CZT')

image = information.find("Image")
pixel_type = text(image.find("PixelType"), "Gray16")
Expand Down
8 changes: 4 additions & 4 deletions ndbioimage/readers/fijiread.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,10 +32,10 @@ def open(self):
self.reader = TiffFile(self.path)
assert self.reader.pages[0].compression == 1, "Can only read uncompressed tiff files."
assert self.reader.pages[0].samplesperpixel == 1, "Can only read 1 sample per pixel."
self.offset = self.reader.pages[0].dataoffsets[0]
self.count = self.reader.pages[0].databytecounts[0]
self.bytes_per_sample = self.reader.pages[0].bitspersample // 8
self.fmt = self.reader.byteorder + self.count // self.bytes_per_sample * 'BHILQ'[self.bytes_per_sample - 1]
self.offset = self.reader.pages[0].dataoffsets[0] # noqa
self.count = self.reader.pages[0].databytecounts[0] # noqa
self.bytes_per_sample = self.reader.pages[0].bitspersample // 8 # noqa
self.fmt = self.reader.byteorder + self.count // self.bytes_per_sample * 'BHILQ'[self.bytes_per_sample - 1] # noqa

def close(self):
self.reader.close()
Expand Down
4 changes: 2 additions & 2 deletions ndbioimage/readers/ndread.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ def _can_open(path):

@cached_property
def ome(self):
def shape(size_x=1, size_y=1, size_c=1, size_z=1, size_t=1):
def shape(size_x=1, size_y=1, size_c=1, size_z=1, size_t=1): # noqa
return size_x, size_y, size_c, size_z, size_t
size_x, size_y, size_c, size_z, size_t = shape(*self.array.shape)
try:
Expand All @@ -42,7 +42,7 @@ def open(self):
if isinstance(self.path, np.ndarray):
self.array = np.array(self.path)
while self.array.ndim < 5:
self.array = np.expand_dims(self.array, -1)
self.array = np.expand_dims(self.array, -1) # noqa
self.path = 'numpy array'

def __frame__(self, c, z, t):
Expand Down
12 changes: 8 additions & 4 deletions ndbioimage/readers/seqread.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
from pathlib import Path
from functools import cached_property
from ome_types import model
from ome_types.units import _quantity_property
from ome_types.units import _quantity_property # noqa
from itertools import product
from datetime import datetime
from abc import ABC
Expand All @@ -15,7 +15,10 @@ def lazy_property(function, field, *arg_fields):
def lazy(self):
if self.__dict__.get(field) is None:
self.__dict__[field] = function(*[getattr(self, arg_field) for arg_field in arg_fields])
self.model_fields_set.add(field)
try:
self.model_fields_set.add(field)
except Exception: # noqa
pass
return self.__dict__[field]
return property(lazy)

Expand All @@ -24,6 +27,7 @@ class Plane(model.Plane):
""" Lazily retrieve delta_t from metadata """
def __init__(self, t0, file, **kwargs):
super().__init__(**kwargs)
# setting fields here because they would be removed by ome_types/pydantic after class definition
setattr(self.__class__, 'delta_t', lazy_property(self.get_delta_t, 'delta_t', 't0', 'file'))
setattr(self.__class__, 'delta_t_quantity', _quantity_property('delta_t'))
self.__dict__['t0'] = t0
Expand Down Expand Up @@ -79,7 +83,7 @@ def ome(self):
else:
pixel_type = "uint16" # assume

size_c, size_z, size_t = [max(i) + 1 for i in zip(*self.filedict.keys())]
size_c, size_z, size_t = (max(i) + 1 for i in zip(*self.filedict.keys()))
t0 = datetime.strptime(metadata["Info"]["Time"], "%Y-%m-%d %H:%M:%S %z")
ome.images.append(
model.Image(
Expand Down Expand Up @@ -123,7 +127,7 @@ def open(self):
pattern_c = re.compile(r"img_\d{3,}_(.*)_\d{3,}$")
pattern_z = re.compile(r"(\d{3,})$")
pattern_t = re.compile(r"img_(\d{3,})")
self.filedict = {(cnamelist.index(pattern_c.findall(file.stem)[0]),
self.filedict = {(cnamelist.index(pattern_c.findall(file.stem)[0]), # noqa
int(pattern_z.findall(file.stem)[0]),
int(pattern_t.findall(file.stem)[0])): file for file in filelist}

Expand Down
6 changes: 3 additions & 3 deletions ndbioimage/readers/tifread.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ class Reader(AbstractReader, ABC):
def _can_open(path):
if isinstance(path, Path) and path.suffix in ('.tif', '.tiff'):
with tifffile.TiffFile(path) as tif:
return tif.is_imagej and tif.pages[-1]._nextifd() == 0
return tif.is_imagej and tif.pages[-1]._nextifd() == 0 # noqa
else:
return False

Expand All @@ -27,12 +27,12 @@ def ome(self):
for key, value in self.reader.imagej_metadata.items()}

page = self.reader.pages[0]
self.p_ndim = page.ndim
self.p_ndim = page.ndim # noqa
size_x = page.imagelength
size_y = page.imagewidth
if self.p_ndim == 3:
size_c = page.samplesperpixel
self.p_transpose = [i for i in [page.axes.find(j) for j in 'SYX'] if i >= 0]
self.p_transpose = [i for i in [page.axes.find(j) for j in 'SYX'] if i >= 0] # noqa
size_t = metadata.get('frames', 1) # // C
else:
size_c = metadata.get('channels', 1)
Expand Down

0 comments on commit d13b702

Please sign in to comment.