diff --git a/deeptrack/extras/datasets.py b/deeptrack/extras/datasets.py index 031c3d772..df5b06a73 100644 --- a/deeptrack/extras/datasets.py +++ b/deeptrack/extras/datasets.py @@ -46,7 +46,12 @@ "CellData": ("1CJW7msDiI7xq7oMce4l9tRkNN6O5eKtj", "CellData", ""), "CellMigData": ("1vRsWcxjbTz6rffCkrwOfs_ezPvUjPwGw", "CellMigData", ""), "BFC2Cells": ("1lHgJdG5I3vRnU_DRFwTr_c69nx1Xkd3X", "BFC2Cells", ""), - "STrajCh": ("1wXCSzvHuLwz1dywxUu2aQXlqbgf2V8r3", "STrajCh", "") + "STrajCh": ("1wXCSzvHuLwz1dywxUu2aQXlqbgf2V8r3", "STrajCh", ""), + "TrajectoryDiffusion": ( + "1YhECLQrWPZgc_TVY2Sl2OwDcNxmA_jR5", + "TrajectoryDiffusion", + "", + ), } @@ -109,7 +114,9 @@ def load(key): # If the extracted folder is another folder with the same name, move it. if os.path.isdir(f"datasets/{folder_name}/{folder_name}"): - os.rename(f"datasets/{folder_name}/{folder_name}", f"datasets/{folder_name}") + os.rename( + f"datasets/{folder_name}/{folder_name}", f"datasets/{folder_name}" + ) def load_model(key): @@ -171,7 +178,9 @@ def load_model(key): # If the extracted folder is another folder with the same name, move it. if os.path.isdir(f"models/{folder_name}/{folder_name}"): - os.rename(f"models/{folder_name}/{folder_name}", f"models/{folder_name}") + os.rename( + f"models/{folder_name}/{folder_name}", f"models/{folder_name}" + ) return f"models/{folder_name}" diff --git a/deeptrack/models/convolutional.py b/deeptrack/models/convolutional.py index 8536b4e3d..21e7a3b67 100644 --- a/deeptrack/models/convolutional.py +++ b/deeptrack/models/convolutional.py @@ -244,6 +244,123 @@ def __init__( super().__init__(model, **kwargs) +class TimeDistributedFullyConvolutional(KerasModel): + """A fully convolutional neural network. + + Parameters + ---------- + input_shape : tuple + The shape of the input. + conv_layers_dimensions : tuple of int or tuple of tuple of int + The number of filters in each convolutional layer. Examples: + - (32, 64, 128) results in + 1. Conv2D(32, 3, activation='relu', padding='same') + 2. MaxPooling2D() + 3. Conv2D(64, 3, activation='relu', padding='same') + 4. MaxPooling2D() + 5. Conv2D(128, 3, activation='relu', padding='same') + 6. MaxPooling2D() + 7. Conv2D(number_of_outputs, 3, activation=output_activation, padding='same') + + - ((32, 32), (64, 64), (128, 128)) results in + 1. Conv2D(32, 3, activation='relu', padding='same') + 2. Conv2D(32, 3, activation='relu', padding='same') + 3. MaxPooling2D() + 4. Conv2D(64, 3, activation='relu', padding='same') + 5. Conv2D(64, 3, activation='relu', padding='same') + 6. MaxPooling2D() + 7. Conv2D(128, 3, activation='relu', padding='same') + 8. Conv2D(128, 3, activation='relu', padding='same') + 9. MaxPooling2D() + 10. Conv2D(number_of_outputs, 3, activation=output_activation, padding='same') + omit_last_pooling : bool + If True, the last MaxPooling2D layer is omitted. Default is False + number_of_outputs : int + The number of output channels. + output_activation : str + The activation function of the output layer. + output_kernel_size : int + The kernel size of the output layer. + return_sequences : bool + If True, the output of the last layer is flattened and returned as a + sequence with shape (batch_size, timesteps, flattened_output_dim). + conv_layers_kwargs : dict + Keyword arguments passed to the convolutional layers. + flatten_block : tf.keras.layers.Layer + The layer used to flatten the output of the last convolutional layer + if return_sequences is True. By default, a Flatten layer is used. + Returns + ------- + model : tf.keras.models.Model + The compiled model. + """ + + def __init__( + self, + input_shape, + conv_layers_dimensions, + omit_last_pooling=False, + number_of_outputs=1, + output_activation="sigmoid", + output_kernel_size=3, + return_sequences=False, + conv_layers_kwargs={}, + flatten_block: layers.Layer = layers.Flatten(), + **kwargs, + ): + + # INITIALIZE DEEP LEARNING NETWORK + if isinstance(input_shape, list): + network_input = [layers.Input(shape) for shape in input_shape] + inputs = layers.Concatenate(axis=-1)(network_input) + else: + network_input = layers.Input(input_shape) + inputs = network_input + + layer = inputs + + # CONVOLUTIONAL BASIS + convolutional_kwargs = { + "kernel_size": 3, + "activation": "relu", + "padding": "same", + } + convolutional_kwargs.update(conv_layers_kwargs) + for idx, depth_dimensions in enumerate(conv_layers_dimensions): + + if isinstance(depth_dimensions, int): + depth_dimensions = (depth_dimensions,) + + for conv_layer_dimension in depth_dimensions: + layer = layers.TimeDistributed( + layers.Conv2D(conv_layer_dimension, **convolutional_kwargs) + )(layer) + + # add pooling layer + if idx < len(conv_layers_dimensions) - 1 or not omit_last_pooling: + layer = layers.TimeDistributed(layers.MaxPooling2D(2, 2))( + layer + ) + + # OUTPUT + if return_sequences: + output_layer = layers.TimeDistributed(flatten_block)(layer) + else: + output_layer = layers.TimeDistributed( + layers.Conv2D( + number_of_outputs, + kernel_size=output_kernel_size, + activation=output_activation, + padding="same", + name="output", + ) + )(layer) + + model = models.Model(network_input, output_layer) + + super().__init__(model, **kwargs) + + class UNet(KerasModel): """Creates and compiles a U-Net.