diff --git a/_src/source/augmentations.rst b/_src/source/augmentations.rst index e37c74a9d..6d007ab44 100644 --- a/_src/source/augmentations.rst +++ b/_src/source/augmentations.rst @@ -69,6 +69,13 @@ Pad :members: :exclude-members: get +PadToMultiplesOf +^^^^^^^^^^^^^^^^ + +.. autoclass:: deeptrack.augmentations.PadToMultiplesOf + :members: + :exclude-members: get + PreLoad ^^^^^^^ diff --git a/_src/source/features.rst b/_src/source/features.rst index d989b39f1..bc4a0de55 100644 --- a/_src/source/features.rst +++ b/_src/source/features.rst @@ -6,6 +6,34 @@ features Module classes <<<<<<<<<<<<<< +AsType +^^^^^^ + +.. autoclass:: deeptrack.features.AsType + :members: + :exclude-members: get + +Bind +^^^^ + +.. autoclass:: deeptrack.features.Bind + :members: + :exclude-members: get + +BindResolve +^^^^^^^^^^^ + +.. autoclass:: deeptrack.features.BindResolve + :members: + :exclude-members: get + +BindUpdate +^^^^^^^^^^ + +.. autoclass:: deeptrack.features.BindUpdate + :members: + :exclude-members: get + Branch ^^^^^^ diff --git a/deeptrack/aberrations.py b/deeptrack/aberrations.py index 7a9ef09c8..f170a53f5 100644 --- a/deeptrack/aberrations.py +++ b/deeptrack/aberrations.py @@ -33,8 +33,8 @@ """ import numpy as np -from deeptrack.features import Feature -from deeptrack.utils import as_list +from .features import Feature +from .utils import as_list class Aberration(Feature): diff --git a/deeptrack/augmentations.py b/deeptrack/augmentations.py index ad0124571..e8584403e 100644 --- a/deeptrack/augmentations.py +++ b/deeptrack/augmentations.py @@ -19,12 +19,16 @@ Flips images diagonally. """ -from deeptrack.features import Feature -from deeptrack.image import Image +from .features import Feature +from .image import Image +from . import utils + import numpy as np -from typing import Callable +import scipy.ndimage as ndimage from scipy.ndimage.interpolation import map_coordinates from scipy.ndimage.filters import gaussian_filter + +from typing import Callable import warnings @@ -72,7 +76,7 @@ def __init__( **kwargs ): - if load_size is not 1: + if load_size != 1: warnings.warn( "Using an augmentation with a load size other than one is no longer supported", DeprecationWarning, @@ -115,6 +119,7 @@ def _process_and_get(self, *args, update_properties=None, **kwargs): not hasattr(self, "cache") or kwargs["update_tally"] - self.last_update >= kwargs["updates_per_reload"] ): + if isinstance(self.feature, list): self.cache = [feature.resolve() for feature in self.feature] else: @@ -151,11 +156,15 @@ def _process_and_get(self, *args, update_properties=None, **kwargs): ] ) else: - new_list_of_lists.append( - Image(self.get(Image(image_list), **kwargs)).merge_properties_from( - image_list - ) - ) + # DANGEROUS + # if not isinstance(image_list, Image): + image_list = Image(image_list) + + output = self.get(image_list, **kwargs) + + if not isinstance(output, Image): + output = Image(output) + new_list_of_lists.append(output.merge_properties_from(image_list)) if update_properties: if not isinstance(new_list_of_lists, list): @@ -252,7 +261,10 @@ def update_properties(self, image, number_of_updates, **kwargs): for prop in image.properties: if "position" in prop: position = prop["position"] - new_position = (image.shape[0] - position[0] - 1, *position[1:]) + new_position = ( + image.shape[0] - position[0] - 1, + *position[1:], + ) prop["position"] = new_position @@ -279,13 +291,6 @@ def update_properties(self, image, number_of_updates, **kwargs): prop["position"] = new_position -from deeptrack.utils import get_kwarg_names -import warnings - -import scipy.ndimage as ndimage -import deeptrack.utils as utils - - class Affine(Augmentation): """ Augmenter to apply affine transformations to images. @@ -386,7 +391,9 @@ def get(self, image, scale, translate, rotate, shear, **kwargs): assert ( image.ndim == 2 or image.ndim == 3 - ), "Affine only supports 2-dimensional or 3-dimension inputs." + ), "Affine only supports 2-dimensional or 3-dimension inputs, got {0}".format( + image.ndim + ) dx, dy = translate fx, fy = scale @@ -551,7 +558,10 @@ def get(self, image, sigma, alpha, ignore_last_dim, **kwargs): for dim in shape: deltas.append( gaussian_filter( - (np.random.rand(*shape) * 2 - 1), sigma, mode="constant", cval=0 + (np.random.rand(*shape) * 2 - 1), + sigma, + mode="constant", + cval=0, ) * alpha ) @@ -619,6 +629,8 @@ def get(self, image, corner, crop, crop_mode, **kwargs): if isinstance(crop, int): crop = (crop,) * image.ndim + crop = [c if c is not None else image.shape[i] for i, c in enumerate(crop)] + # Get amount to crop from image if crop_mode == "retain": crop_amount = np.array(image.shape) - np.array(crop) @@ -631,12 +643,9 @@ def get(self, image, corner, crop, crop_mode, **kwargs): crop_amount = np.amax((np.array(crop_amount), [0] * image.ndim), axis=0) crop_amount = np.amin((np.array(image.shape) - 1, crop_amount), axis=0) # Get corner of crop - if corner == "random": + if isinstance(corner, str) and corner == "random": # Ensure seed is consistent - slice_start = np.random.randint( - [0] * crop_amount.size, - crop_amount + 1, - ) + slice_start = [np.random.randint(m + 1) for m in crop_amount] elif callable(corner): slice_start = corner(image) else: @@ -654,6 +663,7 @@ def get(self, image, corner, crop, crop_mode, **kwargs): for slice_start_i, slice_end_i in zip(slice_start, slice_end) ] ) + cropped_image = image[slices] # Update positions @@ -729,15 +739,74 @@ def __init__(self, px=(0, 0, 0, 0), mode="constant", cval=0, **kwargs): def get(self, image, px, **kwargs): padding = [] - if isinstance(px, int): + if callable(px): + px = px(image) + elif isinstance(px, int): padding = [(px, px)] * image.ndom + for idx in range(0, len(px), 2): padding.append((px[idx], px[idx + 1])) while len(padding) < image.ndim: padding.append((0, 0)) - return utils.safe_call(np.pad, positional_args=(image, padding), **kwargs) + return ( + utils.safe_call(np.pad, positional_args=(image, padding), **kwargs), + padding, + ) + + def _process_and_get(self, images, **kwargs): + results = [self.get(image, **kwargs) for image in images] + for idx, result in enumerate(results): + if isinstance(result, tuple): + shape = result[0].shape + padding = result[1] + de_pad = tuple( + slice(p[0], shape[dim] - p[1]) for dim, p in enumerate(padding) + ) + results[idx] = ( + Image(result[0]).merge_properties_from(images[idx]), + {"undo_padding": de_pad}, + ) + else: + Image(results[idx]).merge_properties_from(images[idx]) + return results + + +class PadToMultiplesOf(Pad): + """Pad images until their height/width is a multiple of a value. + + Parameters + ---------- + multiple : int or tuple of (int or None) + Images will be padded until their width is a multiple of + this value. If a tuple, it is assumed to be a multiple per axis. + A value of None or -1 indicates to skip that axis. + + """ + + def __init__(self, multiple=1, **kwargs): + def amount_to_pad(image): + shape = image.shape + multiple = self.multiple.current_value + + if not isinstance(multiple, (list, tuple, np.ndarray)): + multiple = (multiple,) * image.ndim + new_shape = [0] * (image.ndim * 2) + idx = 0 + for dim, mul in zip(shape, multiple): + if mul is not None and mul is not -1: + to_add = -dim % mul + to_add_first = to_add // 2 + to_add_after = to_add - to_add_first + new_shape[idx * 2] = to_add_first + new_shape[idx * 2 + 1] = to_add_after + + idx += 1 + + return new_shape + + super().__init__(multiple=multiple, px=lambda: amount_to_pad, **kwargs) -# TODO: add resizing by rescaling \ No newline at end of file +# TODO: add resizing by rescaling diff --git a/deeptrack/features.py b/deeptrack/features.py index 03e6df5ec..379116149 100644 --- a/deeptrack/features.py +++ b/deeptrack/features.py @@ -9,28 +9,26 @@ StructuralFeature Abstract extension of feature for interactions between features. Branch - Implementation of `StructuralFeature` that resolves two features + Implementation of `StructuralFeature` that resolves two features sequentially. Probability - Implementation of `StructuralFeature` that randomly resolves a feature + Implementation of `StructuralFeature` that randomly resolves a feature with a certain probability. Duplicate - Implementation of `StructuralFeature` that sequentially resolves an + Implementation of `StructuralFeature` that sequentially resolves an integer number of deep-copies of a feature. """ import copy +import enum -from abc import ABC, abstractmethod from typing import List import numpy as np -import time import threading -from deeptrack.image import Image -from deeptrack.properties import Property, PropertyDict -from deeptrack.utils import isiterable, hasmethod, get_kwarg_names, kwarg_has_default +from .image import Image +from .properties import Property, PropertyDict MERGE_STRATEGY_OVERRIDE = 0 @@ -200,8 +198,12 @@ def resolve(self, image_list: Image or List[Image] = None, **global_kwargs): property_verbosity = global_kwargs.get("property_memorability", 1) feature_input["name"] = type(self).__name__ if self.__property_memorability__ <= property_verbosity: - for image in new_list: - image.append(feature_input) + for index, image in enumerate(new_list): + if isinstance(image, tuple): + image[0].append({**feature_input, **image[1]}) + new_list[index] = image[0] + else: + image.append(feature_input) # Merge input and new_list if self.__list_merge_strategy__ == MERGE_STRATEGY_OVERRIDE: @@ -305,11 +307,11 @@ def plot( display(HTML(anim.to_jshtml())) return anim - except NameError as e: + except NameError: # Not in an notebook plt.show() - except RuntimeError as e: + except RuntimeError: # In notebook, but animation failed import ipywidgets as widgets @@ -333,18 +335,27 @@ def _process_and_get(self, image_list, **feature_input) -> List[Image]: if self.__distributed__: # Call get on each image in list, and merge properties from corresponding image - return [ - Image(self.get(image, **feature_input)).merge_properties_from(image) - for image in image_list - ] + + results = [] + + for image in image_list: + output = self.get(image, **feature_input) + if not isinstance(output, Image): + output = Image(output) + results.append(output) + + return results + else: # Call get on entire list. new_list = self.get(image_list, **feature_input) if not isinstance(new_list, list): - new_list = [Image(new_list)] + new_list = [new_list] - new_list = [Image(image) for image in new_list] + for idx, image in enumerate(new_list): + if not isinstance(image, Image): + new_list[idx] = Image(image) return new_list def _format_input(self, image_list, **kwargs) -> List[Image]: @@ -567,6 +578,61 @@ def get(self, image_list, features, **kwargs): return [feature.resolve(image_list, **kwargs) for feature in features] +class Bind(StructuralFeature): + """Binds a feature with property arguments. + + When the feature is resolved, the kwarg arguments are passed + to the child feature. + + Parameters + ---------- + feature : Feature + The child feature + **kwargs + Properties to send to child + + """ + + __distributed__ = False + + def __init__(self, feature: Feature, **kwargs): + super().__init__(feature=feature, **kwargs) + + def get(self, image, feature, **kwargs): + return feature.resolve(image, **kwargs) + + +BindResolve = Bind + + +class BindUpdate(StructuralFeature): + """Binds a feature with certain arguments. + + When the feature is updated, the child feature + + Parameters + ---------- + feature : Feature + The child feature + **kwargs + Properties to send to child + + """ + + __distributed__ = False + + def __init__(self, feature: Feature, **kwargs): + self.feature = feature + super().__init__(**kwargs) + + def _update(self, **kwargs): + super()._update(**kwargs) + self.feature._update(**{**kwargs, **self.properties}) + + def get(self, image, **kwargs): + return self.feature.resolve(image, **kwargs) + + class ConditionalSetProperty(StructuralFeature): """Conditionally overrides the properties of child features @@ -818,30 +884,32 @@ def get( get_one_random, **kwargs ): + if not isinstance(path, List): + path = [path] if load_options is None: load_options = {} try: - image = np.load(path, **load_options) + image = [np.load(file, **load_options) for file in path] except (IOError, ValueError): try: from skimage import io - image = io.imread(path) + image = [io.imread(file) for file in path] except (IOError, ImportError, AttributeError): try: import PIL.Image - omage = np.array(PIL.Image.open(path, **load_options)) + image = [PIL.Image.open(file, **load_options) for file in path] except (IOError, ImportError): import cv2 - image = np.array(cv2.imread(path, **load_options)) + image = [cv2.imread(file, **load_options) for file in path] if not image: raise IOError( "No filereader available for file {0}".format(path) ) - image = np.squeeze(image) + image = np.stack(image, axis=-1) if to_grayscale: try: @@ -974,7 +1042,8 @@ def _process_and_get(self, images, **kwargs): p0 = p0.astype(np.int) output_slice = output[ - p0[0] : p0[0] + labelarg.shape[0], p0[1] : p0[1] + labelarg.shape[1] + p0[0] : p0[0] + labelarg.shape[0], + p0[1] : p0[1] + labelarg.shape[1], ] for label_index in range(kwargs["number_of_masks"]): @@ -1024,7 +1093,8 @@ def _process_and_get(self, images, **kwargs): p0[1] : p0[1] + labelarg.shape[1], label_index, ] = merge( - output_slice[..., label_index], labelarg[..., label_index] + output_slice[..., label_index], + labelarg[..., label_index], ) output = Image(output) for label in list_of_labels: @@ -1038,7 +1108,7 @@ def _get_position(image, mode="corner", return_z=False): if mode == "corner": shift = (np.array(image.shape) - 1) / 2 else: - shift = np.zeros((num_outputs)) + shift = np.zeros((3 if return_z else 2)) positions = image.get_property("position", False, []) @@ -1054,7 +1124,11 @@ def _get_position(image, mode="corner", return_z=False): if return_z: outp = ( np.array( - [position[0], position[1], image.get_property("z", default=0)] + [ + position[0], + position[1], + image.get_property("z", default=0), + ] ) - shift ) @@ -1063,3 +1137,23 @@ def _get_position(image, mode="corner", return_z=False): positions_out.append(position - shift[0:2]) return positions_out + + +class AsType(Feature): + """Converts the data type of images + + Accepts same types as numpy arrays. Common types include + + `float64, int32, uint16, int16, uint8, int8` + + Parameters + ---------- + dtype : str + dtype string. Same as numpy dtype. + """ + + def __init__(self, dtype="float64", **kwargs): + super().__init__(dtype=dtype, **kwargs) + + def get(self, image, dtype, **kwargs): + return image.astype(dtype) \ No newline at end of file diff --git a/deeptrack/generators.py b/deeptrack/generators.py index 8c8d4412d..6b569673f 100644 --- a/deeptrack/generators.py +++ b/deeptrack/generators.py @@ -12,8 +12,8 @@ from typing import List import tensorflow.keras as keras -from deeptrack.features import Feature -from deeptrack.image import Image +from .features import Feature +from .image import Image import threading import random import time @@ -107,10 +107,12 @@ def generate( if sub_batch.ndim > ndim: dims_to_remove = sub_batch.ndim - ndim sub_batch = np.reshape( - sub_batch, (-1, *sub_batch.shape[dims_to_remove + 1 :]) + sub_batch, + (-1, *sub_batch.shape[dims_to_remove + 1 :]), ) sub_labels = np.reshape( - sub_labels, (-1, *sub_labels.shape[dims_to_remove + 1 :]) + sub_labels, + (-1, *sub_labels.shape[dims_to_remove + 1 :]), ) elif sub_batch.ndim < ndim: @@ -302,11 +304,11 @@ def __getitem__(self, idx): return outputs def __len__(self): - l = int((len(self.current_data) // self._batch_size)) + steps = int((len(self.current_data) // self._batch_size)) assert ( - l > 0 + steps > 0 ), "There needs to be at least batch_size number of datapoints. Try increasing min_data_size." - return l + return steps def _continuous_get_training_data(self): index = 0 @@ -317,11 +319,11 @@ def _continuous_get_training_data(self): new_image = self._get(self.feature, self.feature_kwargs) - if self.label_function: - new_label = Image(self.label_function(new_image)) + # if self.label_function: + new_label = self.label_function(new_image) if self.batch_function: - new_image = Image(self.batch_function(new_image)) + new_image = self.batch_function(new_image) if new_image.ndim < self.ndim: new_image = [new_image] @@ -329,7 +331,10 @@ def _continuous_get_training_data(self): for new_image_i, new_label_i in zip(new_image, new_label): if len(self.data) >= self.max_data_size: - self.data[index % self.max_data_size] = (new_image_i, new_label_i) + self.data[index % self.max_data_size] = ( + new_image_i, + new_label_i, + ) else: self.data.append((new_image_i, new_label_i)) diff --git a/deeptrack/image.py b/deeptrack/image.py index be20fba44..72984ca3a 100644 --- a/deeptrack/image.py +++ b/deeptrack/image.py @@ -176,7 +176,7 @@ def __array_wrap__(self, image_after_function, context=None): for arg in input_args: - if not arg is self and isinstance(arg, Image): + if arg is not self and isinstance(arg, Image): self.merge_properties_from(arg) return image_with_restored_properties diff --git a/deeptrack/layers.py b/deeptrack/layers.py index 5e68ec756..fe90f98c5 100644 --- a/deeptrack/layers.py +++ b/deeptrack/layers.py @@ -1,17 +1,16 @@ """ Standardized layers implemented in keras. """ -import tensorflow -from tensorflow.keras import layers, activations -from tensorflow.keras.initializers import RandomNormal + +from tensorflow.keras import layers try: from tensorflow_addons.layers import InstanceNormalization -except: +except Exception: import warnings - InstanceNormalization = layers.Layer() + InstanceNormalization = layers.Layer warnings.warn( "DeepTrack not installed with tensorflow addons. Instance normalization will not work. Consider upgrading to tensorflow >= 2.0.", ImportWarning, @@ -37,10 +36,12 @@ def as_block(x): def _as_activation(x): if x is None: return layers.Layer() - if isinstance(x, layers.Layer): + elif isinstance(x, str): + return layers.Activation(x) + elif isinstance(x, layers.Layer): return x else: - return layers.Activation(x) + return layers.Layers(x) def _single_layer_call(x, layer, instance_norm, activation): @@ -225,6 +226,7 @@ def StaticUpsampleBlock( kernel_size=(1, 1), strides=1, padding="same", + with_conv=True, **kwargs ): """A single no-trainable 2d deconvolutional layer. @@ -247,21 +249,24 @@ def StaticUpsampleBlock( def Layer(filters, **kwargs_inner): kwargs_inner.update(kwargs) - layer = layers.UpSampling2D( - size=size, interpolation=interpolation, **kwargs_inner - ) + layer = layers.UpSampling2D(size=size, interpolation=interpolation) + conv = layers.Conv2D( filters, kernel_size=kernel_size, strides=strides, padding=padding, + **kwargs_inner ) def call(x): y = layer(x) - return _single_layer_call( - y, conv, _instance_norm(instance_norm, filters), activation - ) + if with_conv: + return _single_layer_call( + y, conv, _instance_norm(instance_norm, filters), activation + ) + else: + return layer(x) return call @@ -359,4 +364,4 @@ def Layer(filters, **kwargs_inner): "deconvolutional": DeconvolutionalBlock(), "none": Identity(), "identity": Identity(), -} \ No newline at end of file +} diff --git a/deeptrack/losses.py b/deeptrack/losses.py index 2a6117af0..ced7f5ddd 100644 --- a/deeptrack/losses.py +++ b/deeptrack/losses.py @@ -2,7 +2,7 @@ Functions --------- -flatten +flatten Flattends the inputs before calling the loss function. sigmoid Adds a signmoid transformation to the prediction before calling the loss function. @@ -117,4 +117,4 @@ def unet_crossentropy(T, P): # Wrap standard keras loss function with flatten. for keras_loss_function in _COMPATIBLE_LOSS_FUNCTIONS: deeptrack_loss_function = flatten(keras_loss_function) - globals()[deeptrack_loss_function.__name__] = deeptrack_loss_function \ No newline at end of file + globals()[deeptrack_loss_function.__name__] = deeptrack_loss_function diff --git a/deeptrack/math.py b/deeptrack/math.py index dfcf2fd5f..bb24d453e 100644 --- a/deeptrack/math.py +++ b/deeptrack/math.py @@ -8,9 +8,13 @@ Min-max image normalization. """ -from deeptrack.features import Feature -from deeptrack.image import Image +from .features import Feature +from .image import Image +from . import utils import numpy as np +import skimage +import skimage.measure +import scipy.ndimage as ndimage class Add(Feature): @@ -164,16 +168,10 @@ def get(self, image, min, max, **kwargs): return image -import deeptrack.utils as utils -import skimage - -import scipy.ndimage as ndimage - - class Blur(Feature): def __init__(self, filter_function, mode="reflect", **kwargs): self.filter = filter_function - super().__init__(borderType=borderType, **kwargs) + super().__init__(borderType=mode, **kwargs) def get(self, image, **kwargs): kwargs.pop("input", False) @@ -203,7 +201,7 @@ def get(self, input, ksize, **kwargs): weights = np.ones(ksize) / np.prod(ksize) - return safe_call(ndimage, input=input, weights=weights, **kwargs) + return utils.safe_call(ndimage, input=input, weights=weights, **kwargs) class GaussianBlur(Blur): @@ -231,12 +229,10 @@ class MedianBlur(Blur): """ def __init__(self, ksize=3, **kwargs): - super().__init__(ndimage.median_filter, k=k, **kwargs) + super().__init__(ndimage.median_filter, k=ksize, **kwargs) -## POOLING - -import skimage.measure +# POOLING class Pool(Feature): @@ -291,7 +287,7 @@ def __init__(self, ksize=3, **kwargs): super().__init__(np.mean, ksize=ksize, **kwargs) -### OPENCV2 blur +# OPENCV2 blur try: import cv2 @@ -363,4 +359,4 @@ def __init__(self, d=3, sigma_color=50, sigma_space=50, **kwargs): sigma_color=sigma_color, sigma_space=sigma_space, **kwargs - ) \ No newline at end of file + ) diff --git a/deeptrack/models.py b/deeptrack/models.py index 03f2dcee0..5c7df96a9 100644 --- a/deeptrack/models.py +++ b/deeptrack/models.py @@ -2,7 +2,7 @@ Classes ------- -ModelFeature +ModelFeature Base model feature class. Convolutional, convolutional Creates and compiles a convolutional neural network. @@ -12,12 +12,13 @@ Creates and compiles a recurrent neural network. """ -from deeptrack.losses import nd_mean_absolute_error -from deeptrack.features import Feature -from deeptrack.layers import as_block -from tensorflow.keras import models, layers, optimizers +import tensorflow +from .losses import nd_mean_absolute_error +from .features import Feature +from .layers import as_block +from tensorflow.keras import models, layers, backend as K +import tensorflow as tf import numpy as np -import warnings def _compile( @@ -143,7 +144,7 @@ def FullyConnected( dense_block = as_block(dense_block) - ### INITIALIZE DEEP LEARNING NETWORK + # INITIALIZE DEEP LEARNING NETWORK input_layer = layers.Input(shape=input_shape) layer = input_layer @@ -155,7 +156,7 @@ def FullyConnected( range(len(dense_layers_dimensions)), dense_layers_dimensions ): - if dense_layer_number is 0 and not flatten_input: + if dense_layer_number == 0 and not flatten_input: layer = dense_block(dense_layer_dimension, input_shape=input_shape)(layer) else: layer = dense_block(dense_layer_dimension)(layer) @@ -184,6 +185,7 @@ def Convolutional( output_activation=None, output_kernel_size=3, loss=nd_mean_absolute_error, + input_layer=None, convolution_block="convolutional", pooling_block="pooling", dense_block="dense", @@ -232,6 +234,9 @@ def Convolutional( layer = inputs + if input_layer: + layer = input_layer(layer) + ### CONVOLUTIONAL BASIS for conv_layer_dimension in conv_layers_dimensions: @@ -284,6 +289,7 @@ def UNet( output_kernel_size=3, output_activation=None, loss=nd_mean_absolute_error, + input_layer=None, encoder_convolution_block="convolutional", base_convolution_block="convolutional", decoder_convolution_block="convolutional", @@ -340,6 +346,9 @@ def UNet( layer = unet_input + if input_layer: + layer = input_layer(layer) + # Downsampling path for conv_layer_dimension in conv_layers_dimensions: for _ in range(steps_per_pooling): @@ -423,10 +432,10 @@ def RNN( Deep learning network. """ - ### INITIALIZE DEEP LEARNING NETWORK + # INITIALIZE DEEP LEARNING NETWORK network = models.Sequential() - ### CONVOLUTIONAL BASIS + # CONVOLUTIONAL BASIS for conv_layer_number, conv_layer_dimension in zip( range(len(conv_layers_dimensions)), conv_layers_dimensions ): @@ -506,7 +515,7 @@ def RNN( rnn = RNN -class cgan(Model): +class cgan(tf.keras.Model): def __init__( self, generator=None, @@ -517,8 +526,10 @@ def __init__( assemble_loss=None, assemble_optimizer=None, assemble_loss_weights=None, + metrics=[], **kwargs ): + super().__init__() # Build and compile the discriminator self.discriminator = discriminator @@ -545,79 +556,73 @@ def __init__( # The assembled model (stacked generator and discriminator) # Trains the generator to fool the discriminator - self.assemble = models.Model(self.model_input, [validity, img]) + self.assemble = tf.keras.models.Model(self.model_input, [validity, img]) + + self.num_losses = len(assemble_loss) + self.assemble.compile( loss=assemble_loss, optimizer=assemble_optimizer, loss_weights=assemble_loss_weights, + metrics=metrics, + ) + self._metrics = [tf.metrics.get(m) for m in metrics] + + def train_step(self, data): + + # Compute data and labels + batch_x, batch_y = data + gen_imgs = self.generator(batch_x) + + # Train the discriminator + with tf.GradientTape() as tape: + # Train in two steps + disc_pred_1 = self.discriminator([batch_y, batch_x]) + disc_pred_2 = self.discriminator([gen_imgs, batch_x]) + shape = tf.shape(disc_pred_1) + valid, fake = tf.ones(shape), tf.zeros(shape) + d_loss = ( + self.discriminator.compiled_loss(disc_pred_1, valid) + + self.discriminator.compiled_loss(disc_pred_2, fake) + ) / 2 + + # Compute gradient and apply gradient + grads = tape.gradient(d_loss, self.discriminator.trainable_weights) + self.discriminator.optimizer.apply_gradients( + zip(grads, self.discriminator.trainable_weights) ) - super().__init__(self.generator, **kwargs) + # Train the assembly - def fit(self, data_generator, epochs, steps_per_epoch=None, **kwargs): - for key in kwargs.keys(): - warnings.warn( - "{0} not implemented for cgan. Does not affect the execution.".format( - key - ) - ) - for epoch in range(epochs): - if not steps_per_epoch: - try: - steps = len(data_generator) - except: - steps = 1 - else: - steps = steps_per_epoch - - d_loss = 0 - g_loss = 0 - - for step in range(steps): - # update data - try: - data, labels = next(data_generator) - except: - data, labels = data_generator[step] - - # Grab disriminator labels - shape = (data.shape[0], *self.discriminator.output.shape[1:]) - valid, fake = np.ones(shape), np.zeros(shape) - - # --------------------- - # Train Discriminator - # --------------------- - - # Generate a batch of new images - gen_imgs = self.generator(data) - - # Train the discriminator - d_loss_real = self.discriminator.train_on_batch([labels, data], valid) - d_loss_fake = self.discriminator.train_on_batch([gen_imgs, data], fake) - d_loss += 0.5 * np.add(d_loss_real, d_loss_fake) - - # --------------------- - # Train Generator - # --------------------- - - # Train the generator (to have the discriminator label samples as valid) - g_loss += np.array(self.assemble.train_on_batch(data, [valid, labels])) - - # Plot the progress - - try: - data_generator.on_epoch_end() - except: - pass - - print( - "%d [D loss: %f, acc.: %.2f%%] [G loss: %f, %f, %f]" - % ( - epoch, - d_loss[0] / steps, - 100 * d_loss[1] / steps, - g_loss[0] / steps, - g_loss[1] / steps, - g_loss[2] / steps, - ) + with tf.GradientTape() as tape: + assemble_output = self.assemble(batch_x) + + generated_image_copies = [assemble_output[1]] * (self.num_losses - 1) + + batch_y_copies = [batch_y] * (self.num_losses - 1) + + g_loss = self.assemble.compiled_loss( + [assemble_output[0], *generated_image_copies], + [valid, *batch_y_copies], ) + + # Compute gradient and apply gradient + grads = tape.gradient(g_loss, self.assemble.trainable_weights) + self.assemble.optimizer.apply_gradients( + zip(grads, self.assemble.trainable_weights) + ) + + # Update the metrics + self.compiled_metrics.update_state(assemble_output[1], batch_y) + + # Define output + loss = { + "d_loss": d_loss, + "g_loss": g_loss, + **{m.name: m.result() for m in self.metrics}, + } + + return loss + + def call(self, *args, **kwargs): + return self.generator.call(*args, **kwargs) diff --git a/deeptrack/noises.py b/deeptrack/noises.py index d53541a27..6801b3f5d 100644 --- a/deeptrack/noises.py +++ b/deeptrack/noises.py @@ -13,8 +13,8 @@ """ import numpy as np -from deeptrack.features import Feature -from deeptrack.image import Image +from .features import Feature +from .image import Image class Noise(Feature): diff --git a/deeptrack/optics.py b/deeptrack/optics.py index f4e970533..fac9fd15f 100644 --- a/deeptrack/optics.py +++ b/deeptrack/optics.py @@ -16,10 +16,9 @@ """ import numpy as np -from deeptrack.features import Feature, StructuralFeature -from deeptrack.image import Image, pad_image_to_fft +from .features import Feature, StructuralFeature +from .image import Image, pad_image_to_fft -from scipy.interpolate import RectBivariateSpline from scipy.ndimage import convolve @@ -78,7 +77,13 @@ def get(self, image, sample, objective, **kwargs): if upscale > 1: mean_imaged_sample = np.reshape( imaged_sample, - (shape[0] // upscale, upscale, shape[1] // upscale, upscale, shape[2]), + ( + shape[0] // upscale, + upscale, + shape[1] // upscale, + upscale, + shape[2], + ), ).mean(axis=(3, 1)) imaged_sample = Image(mean_imaged_sample).merge_properties_from( @@ -97,7 +102,10 @@ def get(self, image, sample, objective, **kwargs): def _update(self, **kwargs): self.properties["sample"].update( - **{**kwargs, **self.objective.update(**kwargs).current_value.properties} + **{ + **kwargs, + **self.objective.update(**kwargs).current_value.properties, + } ) super()._update(**kwargs) @@ -282,11 +290,15 @@ def _pad_volume( new_limits[i, :] = ( np.min([new_limits[i, 0], upscaled_output_region[i] - padding[1]]), np.max( - [new_limits[i, 1], upscaled_output_region[i + 2] + padding[i + 2]] + [ + new_limits[i, 1], + upscaled_output_region[i + 2] + padding[i + 2], + ] ), ) new_volume = np.zeros( - np.diff(new_limits, axis=1)[:, 0].astype(np.int32), dtype=np.complex + np.diff(new_limits, axis=1)[:, 0].astype(np.int32), + dtype=np.complex, ) old_region = (limits - new_limits).astype(np.int32) @@ -370,7 +382,9 @@ def get(self, illuminated_volume, limits, **kwargs): ) padded_volume = padded_volume[ - output_region[0] : output_region[2], output_region[1] : output_region[3], : + output_region[0] : output_region[2], + output_region[1] : output_region[3], + :, ] z_limits = limits[2, :] @@ -380,7 +394,10 @@ def get(self, illuminated_volume, limits, **kwargs): # Get planes in volume where not all values are 0. z_iterator = np.linspace( - z_limits[0], z_limits[1], num=padded_volume.shape[2], endpoint=False + z_limits[0], + z_limits[1], + num=padded_volume.shape[2], + endpoint=False, ) zero_plane = np.all(padded_volume == 0, axis=(0, 1), keepdims=False) z_values = z_iterator[~zero_plane] @@ -496,7 +513,9 @@ def get(self, illuminated_volume, limits, fields, **kwargs): ) padded_volume = padded_volume[ - output_region[0] : output_region[2], output_region[1] : output_region[3], : + output_region[0] : output_region[2], + output_region[1] : output_region[3], + :, ] z_limits = limits[2, :] @@ -504,7 +523,10 @@ def get(self, illuminated_volume, limits, fields, **kwargs): index_iterator = range(padded_volume.shape[2]) z_iterator = np.linspace( - z_limits[0], z_limits[1], num=padded_volume.shape[2], endpoint=False + z_limits[0], + z_limits[1], + num=padded_volume.shape[2], + endpoint=False, ) zero_plane = np.all(padded_volume == 0, axis=(0, 1), keepdims=False) @@ -766,7 +788,12 @@ def _create_volume( continue padded_scatterer = Image( - np.pad(scatterer, [(2, 2), (2, 2), (2, 2)], "constant", constant_values=0) + np.pad( + scatterer, + [(2, 2), (2, 2), (2, 2)], + "constant", + constant_values=0, + ) ) padded_scatterer.properties = scatterer.properties scatterer = padded_scatterer @@ -815,7 +842,8 @@ def _create_volume( if not (np.array(new_limits) == np.array(limits)).all(): new_volume = np.zeros( - np.diff(new_limits, axis=1)[:, 0].astype(np.int32), dtype=np.complex + np.diff(new_limits, axis=1)[:, 0].astype(np.int32), + dtype=np.complex, ) old_region = (limits - new_limits).astype(np.int32) limits = limits.astype(np.int32) diff --git a/deeptrack/properties.py b/deeptrack/properties.py index 300308c90..4ce380389 100644 --- a/deeptrack/properties.py +++ b/deeptrack/properties.py @@ -18,9 +18,16 @@ """ import numpy as np -from deeptrack.utils import isiterable, hasmethod, get_kwarg_names, kwarg_has_default -import deeptrack +from .utils import ( + isiterable, + get_kwarg_names, + kwarg_has_default, +) + +from . import features + import copy +import collections class Property: @@ -72,11 +79,11 @@ def current_value(self): @current_value.setter def current_value(self, updated_current_value): self._current_value = updated_current_value - if id(self) not in deeptrack.UPDATE_MEMO["memoization"]: + if id(self) not in features.UPDATE_MEMO["memoization"]: # Some values work, some don't. self, updated_current_value and self._current_value work # Best guess is an error in the gc reference counter causing it to dereference # But then again, I don't think it should be the same reference anyway - deeptrack.UPDATE_MEMO["memoization"][id(self)] = updated_current_value + features.UPDATE_MEMO["memoization"][id(self)] = updated_current_value @current_value.getter def current_value(self): @@ -109,15 +116,15 @@ def update(self, **kwargs) -> "Property": # a = 1+1 if ( - deeptrack.UPDATE_LOCK.locked() - and my_id in deeptrack.UPDATE_MEMO["memoization"] + features.UPDATE_LOCK.locked() + and my_id in features.UPDATE_MEMO["memoization"] ): return self if self.parent: kwargs.update(self.parent) - kwargs.update(deeptrack.UPDATE_MEMO["user_arguments"]) + kwargs.update(features.UPDATE_MEMO["user_arguments"]) self.current_value = self.sample(self.sampling_rule, **kwargs) return self @@ -155,7 +162,7 @@ def sample(self, sampling_rule, **kwargs): """ - if isinstance(sampling_rule, deeptrack.Feature): + if isinstance(sampling_rule, features.Feature): # I am worried passing kwargs may lead to name clash sampling_rule._update(**kwargs) return sampling_rule @@ -229,7 +236,7 @@ def sample(self, sampling_rule, **kwargs): return sampling_rule def __deepcopy__(self, memo): - is_in = id(self) in deeptrack.UPDATE_MEMO["memoization"] + is_in = id(self) in features.UPDATE_MEMO["memoization"] if is_in: return self else: @@ -309,12 +316,12 @@ def update(self, sequence_length=0, **kwargs): """ my_id = id(self) if ( - deeptrack.UPDATE_LOCK.locked() - and my_id in deeptrack.UPDATE_MEMO["memoization"] + features.UPDATE_LOCK.locked() + and my_id in features.UPDATE_MEMO["memoization"] ): return self - kwargs.update(deeptrack.UPDATE_MEMO["user_arguments"]) + kwargs.update(features.UPDATE_MEMO["user_arguments"]) new_current_value = [] @@ -338,13 +345,10 @@ def update(self, sequence_length=0, **kwargs): new_current_value.append(next_value) self.current_value = new_current_value - deeptrack.UPDATE_MEMO["memoization"][my_id] = new_current_value + features.UPDATE_MEMO["memoization"][my_id] = new_current_value return self -import collections - - class PropertyDict(collections.OrderedDict): """Dictionary with Property elements @@ -382,7 +386,7 @@ def current_value_dict(self, **kwargs) -> dict: # of the current timestep if isinstance(property, SequentialProperty): sequence_step = kwargs.get("sequence_step", None) - if not sequence_step is None: + if sequence_step is not None: property_value = property_value[sequence_step] current_value_dict[key] = property_value @@ -402,11 +406,11 @@ def update(self, **kwargs) -> "PropertyDict": """ property_arguments = collections.OrderedDict(self) property_arguments.update(kwargs) - property_arguments.update(deeptrack.UPDATE_MEMO["user_arguments"]) + property_arguments.update(features.UPDATE_MEMO["user_arguments"]) for key, prop in self.items(): if isinstance(property_arguments[key], Property): prop.update(**property_arguments) - elif id(prop) not in deeptrack.UPDATE_MEMO["memoization"]: + elif id(prop) not in features.UPDATE_MEMO["memoization"]: prop.current_value = property_arguments[key] return self @@ -447,4 +451,4 @@ def sample(self, **kwargs) -> dict: for key, property in self.items(): sample_dict[key] = property.sample(**kwargs) - return sample_dict \ No newline at end of file + return sample_dict diff --git a/deeptrack/scatterers.py b/deeptrack/scatterers.py index de9063b9a..b28958985 100644 --- a/deeptrack/scatterers.py +++ b/deeptrack/scatterers.py @@ -1,6 +1,6 @@ """Implementations of Feature the model scattering objects. -Provides some basic implementations of scattering objects +Provides some basic implementations of scattering objects that are frequently used. Classes @@ -18,14 +18,13 @@ """ -from threading import Lock import numpy as np -# from scipy.special import jv as jn, spherical_jn as jv, h1vp, eval_legendre as leg, jvp -import deeptrack.backend as D -from deeptrack.features import Feature, MERGE_STRATEGY_APPEND -from deeptrack.image import Image -import deeptrack.image +from . import backend as D +from .features import Feature, MERGE_STRATEGY_APPEND +from .image import Image +from . import image +import warnings class Scatterer(Feature): @@ -60,8 +59,8 @@ class Scatterer(Feature): Other Parameters ---------------- upsample_axes : tuple of ints - Sets the axes along which the calculation is upsampled (default is None, - which implies all axes are upsampled). + Sets the axes along which the calculation is upsampled (default is + None, which implies all axes are upsampled). crop_zeros : bool Whether to remove slices in which all elements are zero. """ @@ -112,10 +111,11 @@ def _process_and_get( # Post processes the created object to handle upsampling, # as well as cropping empty slices. if not self._processed_properties: - import warnings warnings.warn( - "Overridden _process_properties method does not call super. This is likely to result in errors if used with Optics.upscale != 1." + "Overridden _process_properties method does not call super. " + + "This is likely to result in errors if used with " + + "Optics.upscale != 1." ) # Calculates upsampled voxel_size @@ -134,7 +134,10 @@ def _process_and_get( if new_image.size == 0: warnings.warn( - "Scatterer created that is smaller than a pixel. This may yield inconsistent results. Consider using upsample on the scatterer, or upscale on the optics.", + "Scatterer created that is smaller than a pixel. " + + "This may yield inconsistent results." + + " Consider using upsample on the scatterer," + + " or upscale on the optics.", Warning, ) @@ -346,7 +349,8 @@ def _process_properties(self, propertydict): length 3. If the radius is a single value, the particle is made a sphere - If the radius are two values, the smallest value is appended as the third value + If the radius are two values, the smallest value is appended as the + third value The rotation vector is padded with zeros until it is of length 3 """ @@ -426,12 +430,13 @@ def get(self, image, radius, rotation, voxel_size, **kwargs): class MieScatterer(Scatterer): """Base implementation of a Mie particle. - New Mie-theory scatterers can be implemented by extending this class, and passing - a function that calculates the coefficients of the harmonics up to order `L`. To be - precise, the feature expects a wrapper function that takes the current values of the - properties, as well as a inner function that takes an integer as the only parameter, - and calculates the coefficients up to that integer. The return format is expected to - be a tuple with two values, corresponding to `an` and `bn`. See + New Mie-theory scatterers can be implemented by extending this class, and + passing a function that calculates the coefficients of the harmonics up to + order `L`. To beprecise, the feature expects a wrapper function that takes + the current values of the properties, as well as a inner function that + takes an integer as the only parameter, and calculates the coefficients up + to that integer. The return format is expected to be a tuple with two + values, corresponding to `an` and `bn`. See `deeptrack.backend.mie_coefficients` for an example. Parameters @@ -439,11 +444,13 @@ class MieScatterer(Scatterer): coefficients : Callable[int] -> Tuple[ndarray, ndarray] Function that returns the harmonics coefficients. offset_z : "auto" or float - Distance from the particle in the z direction the field is evaluated. If "auto", - this is calculated from the pixel size and `collection_angle` + Distance from the particle in the z direction the field is evaluated. + If "auto", this is calculated from the pixel size and + `collection_angle` collection_angle : "auto" or float The maximum collection angle in radians. If "auto", this - is calculated from the objective NA (which is true if the objective is the limiting + is calculated from the objective NA (which is true if the objective is + the limiting aperature). polarization_angle : float Angle of the polarization of the incoming light relative to the x-axis. @@ -507,7 +514,7 @@ def _process_properties(self, properties): def get( self, - image, + inp, position, upscaled_output_region, voxel_size, @@ -535,7 +542,7 @@ def get( - upscaled_output_region[1] + padding[1] ) - arr = deeptrack.image.pad_image_to_fft(np.zeros((xSize, ySize))) + arr = image.pad_image_to_fft(np.zeros((xSize, ySize))) # Evluation grid x = np.arange(-padding[0], arr.shape[0] - padding[0]) - (position[1]) * upscale @@ -561,11 +568,11 @@ def get( PI, TAU = D.mie_harmonics(ct, L) # Normalization factor - E = [(2 * l + 1) / (l * (l + 1)) for l in range(1, L + 1)] + E = [(2 * i + 1) / (i * (i + 1)) for i in range(1, L + 1)] # Scattering terms - S1 = sum([E[l] * A[l] * TAU[l] + E[l] * B[l] * PI[l] for l in range(0, L)]) - S2 = sum([E[l] * B[l] * TAU[l] + E[l] * A[l] * PI[l] for l in range(0, L)]) + S1 = sum([E[i] * A[i] * TAU[i] + E[i] * B[i] * PI[i] for i in range(0, L)]) + S2 = sum([E[i] * B[i] * TAU[i] + E[i] * A[i] * PI[i] for i in range(0, L)]) field = ( (ct > ct_max) @@ -581,11 +588,13 @@ def get( class MieSphere(MieScatterer): """Scattered field by a sphere - Should be calculated on at least a 64 by 64 grid. Use padding in the optics if necessary + Should be calculated on at least a 64 by 64 grid. Use padding in the + optics if necessary. - Calculates the scattered field by a spherical particle in a homogenous medium, - as predicted by Mie theory. Note that the induced phase shift is calculated - in comparison to the `refractive_index_medium` property of the optical device. + Calculates the scattered field by a spherical particle in a homogenous + medium, as predicted by Mie theory. Note that the induced phase shift is + calculated in comparison to the `refractive_index_medium` property of the + optical device. Parameters ---------- @@ -604,12 +613,13 @@ class MieSphere(MieScatterer): The position in the direction normal to the camera plane. Used if `position` is of length 2. offset_z : "auto" or float - Distance from the particle in the z direction the field is evaluated. If "auto", - this is calculated from the pixel size and `collection_angle` + Distance from the particle in the z direction the field is evaluated. + If "auto", this is calculated from the pixel size and + `collection_angle` collection_angle : "auto" or float The maximum collection angle in radians. If "auto", this - is calculated from the objective NA (which is true if the objective is the limiting - aperature). + is calculated from the objective NA (which is true if the objective + is the limiting aperature). polarization_angle : float Angle of the polarization of the incoming light relative to the x-axis. """ @@ -649,13 +659,15 @@ def inner(L): class MieStratifiedSphere(MieScatterer): """Scattered field by a stratified sphere - A stratified sphere is a sphere with several concentric shells of uniform refractive index. + A stratified sphere is a sphere with several concentric shells of uniform + refractive index. - Should be calculated on at least a 64 by 64 grid. Use padding in the optics if necessary + Should be calculated on at least a 64 by 64 grid. Use padding in the + optics if necessary - Calculates the scattered field by in a homogenous medium, as predicted by Mie theory. - Note that the induced phase shift is calculated in comparison to the - `refractive_index_medium` property of the optical device. + Calculates the scattered field by in a homogenous medium, as predicted by + Mie theory. Note that the induced phase shift is calculated in comparison + to the `refractive_index_medium` property of the optical device. Parameters ---------- @@ -674,12 +686,13 @@ class MieStratifiedSphere(MieScatterer): The position in the direction normal to the camera plane. Used if `position` is of length 2. offset_z : "auto" or float - Distance from the particle in the z direction the field is evaluated. If "auto", - this is calculated from the pixel size and `collection_angle` + Distance from the particle in the z direction the field is evaluated. + If "auto", this is calculated from the pixel size and + `collection_angle` collection_angle : "auto" or float The maximum collection angle in radians. If "auto", this - is calculated from the objective NA (which is true if the objective is the limiting - aperature). + is calculated from the objective NA (which is true if the objective + is the limiting aperature). polarization_angle : float Angle of the polarization of the incoming light relative to the x-axis. """ diff --git a/deeptrack/sequences.py b/deeptrack/sequences.py index 8858d2c26..462284138 100644 --- a/deeptrack/sequences.py +++ b/deeptrack/sequences.py @@ -11,8 +11,8 @@ Converts a feature to be resolved as a sequence. """ -from deeptrack.features import Feature -from deeptrack.properties import SequentialProperty +from .features import Feature +from .properties import SequentialProperty class Sequence(Feature): @@ -67,11 +67,13 @@ def update(self, **kwargs): def Sequential(feature: Feature, **kwargs): """Converts a feature to be resolved as a sequence. - Should be called on individual features, not combinations of features. All keyword - arguments will be trated as sequential properties and will be passed to the parent feature. + Should be called on individual features, not combinations of features. All + keyword arguments will be trated as sequential properties and will be + passed to the parent feature. - If a property from the keyword argument already exists on the feature, the existing property - will be used to initilize the passed property (that is, it will be used for the first timestep). + If a property from the keyword argument already exists on the feature, the + existing property will be used to initilize the passed property (that is, + it will be used for the first timestep). Parameters ---------- diff --git a/deeptrack/utils.py b/deeptrack/utils.py index c35d3f077..9ced9f335 100644 --- a/deeptrack/utils.py +++ b/deeptrack/utils.py @@ -11,7 +11,7 @@ isiterable(obj: any) Return True if the object is iterable. Else, return False. as_list(obj: any) - If the input is iterable, convert it to list. + If the input is iterable, convert it to list. Otherwise, wrap the input in a list. get_kwarg_names(function: Callable) Return the names of the keyword arguments the function accepts. @@ -142,8 +142,8 @@ def kwarg_has_default(function: Callable, argument: str) -> bool: def safe_call(function, positional_args=[], **kwargs): """Calls a function, using keyword arguments from a dictionary of arguments. - If the function does not accept one of the argument provided, it will not be passed. Does not support - non-keyword arguments. + If the function does not accept one of the argument provided, it will not + be passed. Does not support non-keyword arguments. Parameters ---------- diff --git a/docs/_sources/augmentations.rst.txt b/docs/_sources/augmentations.rst.txt index e37c74a9d..6d007ab44 100644 --- a/docs/_sources/augmentations.rst.txt +++ b/docs/_sources/augmentations.rst.txt @@ -69,6 +69,13 @@ Pad :members: :exclude-members: get +PadToMultiplesOf +^^^^^^^^^^^^^^^^ + +.. autoclass:: deeptrack.augmentations.PadToMultiplesOf + :members: + :exclude-members: get + PreLoad ^^^^^^^ diff --git a/docs/_sources/features.rst.txt b/docs/_sources/features.rst.txt index d989b39f1..bc4a0de55 100644 --- a/docs/_sources/features.rst.txt +++ b/docs/_sources/features.rst.txt @@ -6,6 +6,34 @@ features Module classes <<<<<<<<<<<<<< +AsType +^^^^^^ + +.. autoclass:: deeptrack.features.AsType + :members: + :exclude-members: get + +Bind +^^^^ + +.. autoclass:: deeptrack.features.Bind + :members: + :exclude-members: get + +BindResolve +^^^^^^^^^^^ + +.. autoclass:: deeptrack.features.BindResolve + :members: + :exclude-members: get + +BindUpdate +^^^^^^^^^^ + +.. autoclass:: deeptrack.features.BindUpdate + :members: + :exclude-members: get + Branch ^^^^^^ diff --git a/docs/augmentations.html b/docs/augmentations.html index 879a1f7eb..ade943ad9 100644 --- a/docs/augmentations.html +++ b/docs/augmentations.html @@ -264,6 +264,10 @@ Pad +
  • + PadToMultiplesOf +
  • +
  • PreLoad
  • @@ -838,6 +842,57 @@

    Pad + +
    +

    PadToMultiplesOf

    +
    +
    +class deeptrack.augmentations.PadToMultiplesOf(multiple=1, **kwargs)
    +

    Pad images until their height/width is a multiple of a value.

    +
    +
    Parameters
    +
    +
    multipleint or tuple of (int or None)

    Images will be padded until their width is a multiple of +this value. If a tuple, it is assumed to be a multiple per axis. +A value of None or -1 indicates to skip that axis.

    +
    +
    +
    +
    +

    Methods

    + ++++ + + + + + + + + + + + + + + +

    plot([input_image, resolve_kwargs, interval])

    Visualizes the output of the feature.

    resolve([image_list])

    Creates the image.

    sample(**kwargs)

    Returns the feature

    update(**kwargs)

    Updates the state of all properties.

    + ++++ + + + + + +

    update_properties

    +
    +

    PreLoad

    diff --git a/docs/features.html b/docs/features.html index 0e03ecae4..7fadb7060 100644 --- a/docs/features.html +++ b/docs/features.html @@ -228,6 +228,22 @@
  • Module classes - +