diff --git a/README.md b/README.md
index 5efa60a56..65648d397 100644
--- a/README.md
+++ b/README.md
@@ -1,12 +1,12 @@
-
+
A comprehensive deep learning framework for digital microscopy.
-
+
@@ -54,8 +54,8 @@ DeepTrack is a general purpose deep learning framework for microscopy, meaning y
Training a CNN-based single particle tracker using simulated data
-
-
+
+
Unsupervised training of a single particle tracker using LodeSTAR
@@ -67,8 +67,8 @@ DeepTrack is a general purpose deep learning framework for microscopy, meaning y
-
-
+
+
Training LodeSTAR to detect multiple cells from a single image
Training a UNet-based multi-particle tracker using simulated data
@@ -144,6 +144,13 @@ https://doi.org/10.1063/5.0034891
See also:
+:
+```
+Midtvedt, B., Pineda, J., Skärberg, F. et al.
+"Single-shot self-supervised object detection in microscopy."
+Nat Commun 13, 7492 (2022).
+```
+
:
```
Jesús Pineda, Benjamin Midtvedt, Harshith Bachimanchi, Sergio Noé, Daniel Midtvedt, Giovanni Volpe,1 and Carlo Manzo
diff --git a/deeptrack/datasets/__init__.py b/deeptrack/datasets/__init__.py
index 9941d71cc..c50597321 100644
--- a/deeptrack/datasets/__init__.py
+++ b/deeptrack/datasets/__init__.py
@@ -4,4 +4,8 @@
regression_holography_nanoparticles,
segmentation_fluorescence_u2os,
detection_holography_nanoparticles,
+ detection_linking_hela,
+ dmdataset,
+ regression_diffusion_landscape,
+ endothelial_vs,
)
\ No newline at end of file
diff --git a/deeptrack/datasets/detection_linking_hela/__init__.py b/deeptrack/datasets/detection_linking_hela/__init__.py
new file mode 100644
index 000000000..fda9a004a
--- /dev/null
+++ b/deeptrack/datasets/detection_linking_hela/__init__.py
@@ -0,0 +1,3 @@
+"""detection_linking_hela dataset."""
+
+from .detection_linking_hela import DetectionLinkingHela
diff --git a/deeptrack/datasets/detection_linking_hela/checksums.tsv b/deeptrack/datasets/detection_linking_hela/checksums.tsv
new file mode 100644
index 000000000..bd2ff74b8
--- /dev/null
+++ b/deeptrack/datasets/detection_linking_hela/checksums.tsv
@@ -0,0 +1,3 @@
+# TODO(detection_linking_hela): If your dataset downloads files, then the checksums
+# will be automatically added here when running
+# `tfds build --register_checksums`.
diff --git a/deeptrack/datasets/detection_linking_hela/detection_linking_hela.py b/deeptrack/datasets/detection_linking_hela/detection_linking_hela.py
new file mode 100644
index 000000000..4f3ff8896
--- /dev/null
+++ b/deeptrack/datasets/detection_linking_hela/detection_linking_hela.py
@@ -0,0 +1,121 @@
+import tensorflow as tf
+import tensorflow_datasets as tfds
+
+import numpy as np
+import pandas as pd
+
+_DESCRIPTION = """
+This dataset includes tracking data from DIC-C2DH-HELA (provided by the sixth edition of the Cell Tracking Challenge).
+It consists of two dataframes: ``nodes`` and ``parenthood``. ``nodes`` contains information about the individual
+cells, while "parenthood" includes information on the lineage of the cells.
+"""
+
+_CITATION = """
+@article{pineda2022geometric,
+ title={Geometric deep learning reveals the spatiotemporal fingerprint of microscopic motion},
+ author={Pineda, Jes{\'u}s and Midtvedt, Benjamin and Bachimanchi, Harshith and No{\'e}, Sergio and Midtvedt, Daniel and Volpe, Giovanni and Manzo, Carlo},
+ journal={arXiv preprint arXiv:2202.06355},
+ year={2022}
+}
+"""
+
+
+class DetectionLinkingHela(tfds.core.GeneratorBasedBuilder):
+ """DatasetBuilder for detection_linking_Hela dataset."""
+
+ VERSION = tfds.core.Version("1.0.0")
+ RELEASE_NOTES = {
+ "1.0.0": "Initial release.",
+ }
+
+ def _info(self) -> tfds.core.DatasetInfo:
+ """Returns the dataset metadata."""
+ NODE_FEATURES = self.get_node_features()
+ return tfds.core.DatasetInfo(
+ builder=self,
+ description=_DESCRIPTION,
+ features=tfds.features.FeaturesDict(
+ {
+ "nodes": tfds.features.FeaturesDict(
+ {
+ **{
+ key: tfds.features.Tensor(
+ shape=(None,), dtype=NODE_FEATURES[key]
+ )
+ for key in NODE_FEATURES.keys()
+ },
+ }
+ ),
+ "parenthood": tfds.features.FeaturesDict(
+ {
+ "child": tfds.features.Tensor(
+ shape=(None,), dtype=tf.int32
+ ),
+ "parent": tfds.features.Tensor(
+ shape=(None,), dtype=tf.int32
+ ),
+ }
+ ),
+ "images": tfds.features.Tensor(
+ shape=(84, 512, 512, 1), dtype=tf.float64
+ ),
+ "masks": tfds.features.Tensor(
+ shape=(84, 512, 512, 1), dtype=tf.float64
+ ),
+ }
+ ),
+ supervised_keys=None,
+ homepage="https://dataset-homepage/",
+ citation=_CITATION,
+ )
+
+ def _split_generators(self, dl_manager: tfds.download.DownloadManager):
+ """Returns SplitGenerators."""
+ # Downloads the data and defines the splits
+ path = dl_manager.download_and_extract(
+ "https://drive.google.com/u/1/uc?id=1itHz4KmrUqDCKpGNyHUiHE4AFhwiJ5XR&export=download"
+ )
+
+ # Returns the Dict[split names, Iterator[Key, Example]]
+ return {
+ "train": self._generate_examples(
+ path / "detection_linking_hela", "train"
+ ),
+ "test": self._generate_examples(
+ path / "detection_linking_hela", "test"
+ ),
+ }
+
+ def _generate_examples(self, path, split):
+ """Yields examples."""
+
+ # Load data
+ nodes, parenthood, images, masks = (
+ pd.read_csv(path / split / "nodesdf.csv"),
+ pd.read_csv(path / split / "parenthood.csv"),
+ np.load(path / split / "images.npy"),
+ np.load(path / split / "masks.npy"),
+ )
+
+ yield "_", {
+ "nodes": {**nodes.to_dict("list")},
+ "parenthood": {**parenthood.to_dict("list")},
+ "images": images * 1.0,
+ "masks": masks * 1.0,
+ }
+
+ def get_node_features(self):
+ return {
+ "frame": tf.int32,
+ "label": tf.int32,
+ "centroid-0": tf.float32,
+ "centroid-1": tf.float32,
+ "area": tf.float32,
+ "mean_intensity": tf.float32,
+ "perimeter": tf.float32,
+ "eccentricity": tf.float32,
+ "solidity": tf.float32,
+ "set": tf.float32,
+ "parent": tf.int32,
+ "solution": tf.float32,
+ }
diff --git a/deeptrack/datasets/detection_linking_hela/dummy_data/TODO-add_fake_data_in_this_directory.txt b/deeptrack/datasets/detection_linking_hela/dummy_data/TODO-add_fake_data_in_this_directory.txt
new file mode 100644
index 000000000..e69de29bb
diff --git a/deeptrack/datasets/dmdataset/__init__.py b/deeptrack/datasets/dmdataset/__init__.py
new file mode 100644
index 000000000..f84cf036f
--- /dev/null
+++ b/deeptrack/datasets/dmdataset/__init__.py
@@ -0,0 +1,3 @@
+"""dmdataset dataset."""
+
+from .dmdataset import Dmdataset
diff --git a/deeptrack/datasets/dmdataset/checksums.tsv b/deeptrack/datasets/dmdataset/checksums.tsv
new file mode 100644
index 000000000..065db4ead
--- /dev/null
+++ b/deeptrack/datasets/dmdataset/checksums.tsv
@@ -0,0 +1,3 @@
+# TODO(dmdataset): If your dataset downloads files, then the checksums
+# will be automatically added here when running
+# `tfds build --register_checksums`.
diff --git a/deeptrack/datasets/dmdataset/dmdataset.py b/deeptrack/datasets/dmdataset/dmdataset.py
new file mode 100644
index 000000000..2d0ea9543
--- /dev/null
+++ b/deeptrack/datasets/dmdataset/dmdataset.py
@@ -0,0 +1,135 @@
+"""dmdataset dataset."""
+
+import tensorflow_datasets as tfds
+import tensorflow as tf
+
+import os
+import scipy
+
+_DESCRIPTION = """
+"""
+
+_CITATION = """
+"""
+
+
+class Dmdataset(tfds.core.GeneratorBasedBuilder):
+ """DatasetBuilder for dmdataset dataset."""
+
+ VERSION = tfds.core.Version("1.0.0")
+ RELEASE_NOTES = {
+ "1.0.0": "Initial release.",
+ }
+
+ def _info(self) -> tfds.core.DatasetInfo:
+ """Returns the dataset metadata."""
+
+ NODE_FEATURES = self.get_features()
+
+ return tfds.core.DatasetInfo(
+ builder=self,
+ description=_DESCRIPTION,
+ features=tfds.features.FeaturesDict(
+ {
+ "graph": tfds.features.FeaturesDict(
+ {
+ **{
+ key: tfds.features.Tensor(
+ shape=feature[0],
+ dtype=feature[1],
+ )
+ for key, feature in NODE_FEATURES[
+ "graph"
+ ].items()
+ },
+ }
+ ),
+ "labels": tfds.features.FeaturesDict(
+ {
+ **{
+ key: tfds.features.Tensor(
+ shape=feature[0],
+ dtype=feature[1],
+ )
+ for key, feature in NODE_FEATURES[
+ "labels"
+ ].items()
+ },
+ }
+ ),
+ "sets": tfds.features.FeaturesDict(
+ {
+ **{
+ key: tfds.features.Tensor(
+ shape=feature[0],
+ dtype=feature[1],
+ )
+ for key, feature in NODE_FEATURES[
+ "sets"
+ ].items()
+ },
+ }
+ ),
+ }
+ ),
+ supervised_keys=None,
+ homepage="https://dataset-homepage/",
+ citation=_CITATION,
+ )
+
+ def _split_generators(self, dl_manager: tfds.download.DownloadManager):
+ """Returns SplitGenerators."""
+ path = dl_manager.download_and_extract(
+ "https://drive.google.com/u/1/uc?id=19vplN2lbKo4KAmv4NRU2qr3NSlzxFzrx&export=download"
+ )
+
+ return {
+ "train": self._generate_examples(
+ os.path.join(path, "dmdataset", "training")
+ ),
+ "test": self._generate_examples(
+ os.path.join(path, "dmdataset", "validation")
+ ),
+ }
+
+ def _generate_examples(self, path):
+ """Yields examples."""
+ data = [{}, {}, {}]
+ for i, subdict in enumerate(self.get_features().values()):
+ files = (*subdict.keys(),)
+
+ for file in files:
+ data_elem = scipy.sparse.load_npz(
+ os.path.join(path, file + ".npz")
+ ).toarray()
+ data_elem = (
+ data_elem[0] if data_elem.shape[0] == 1 else data_elem
+ )
+
+ data[i][file] = data_elem
+
+ yield "key", {
+ "graph": data[0],
+ "labels": data[1],
+ "sets": data[2],
+ }
+
+ def get_features(self):
+ return {
+ "graph": {
+ "frame": [(None, 1), tf.float64],
+ "node_features": [(None, 3), tf.float64],
+ "edge_features": [(None, 1), tf.float64],
+ "edge_indices": [(None, 2), tf.int64],
+ "edge_dropouts": [(None, 2), tf.float64],
+ },
+ "labels": {
+ "node_labels": [(None,), tf.float64],
+ "edge_labels": [(None,), tf.float64],
+ "global_labels": [(None, 3), tf.float64],
+ },
+ "sets": {
+ "node_sets": [(None, 2), tf.int64],
+ "edge_sets": [(None, 3), tf.int64],
+ },
+ }
diff --git a/deeptrack/datasets/endothelial_vs/__init__.py b/deeptrack/datasets/endothelial_vs/__init__.py
new file mode 100644
index 000000000..7fadae3ac
--- /dev/null
+++ b/deeptrack/datasets/endothelial_vs/__init__.py
@@ -0,0 +1,3 @@
+"""endothelial_vs dataset."""
+
+from .endothelial_vs import EndothelialVs
diff --git a/deeptrack/datasets/endothelial_vs/checksums.tsv b/deeptrack/datasets/endothelial_vs/checksums.tsv
new file mode 100644
index 000000000..ed5533a5e
--- /dev/null
+++ b/deeptrack/datasets/endothelial_vs/checksums.tsv
@@ -0,0 +1 @@
+https://drive.google.com/u/1/uc?id=10gqn0MwuxgyfJVWDZ6a8xduQuGAke4K3&export=download 878669723 7059c1e1245ed9127f207d41efc7d53187d8e4c46d75bea8c90dce630cabe6b4 LiveDeadDataset.zip
diff --git a/deeptrack/datasets/endothelial_vs/dummy_data/TODO-add_fake_data_in_this_directory.txt b/deeptrack/datasets/endothelial_vs/dummy_data/TODO-add_fake_data_in_this_directory.txt
new file mode 100644
index 000000000..e69de29bb
diff --git a/deeptrack/datasets/endothelial_vs/endothelial_vs.py b/deeptrack/datasets/endothelial_vs/endothelial_vs.py
new file mode 100644
index 000000000..2a5bb13e0
--- /dev/null
+++ b/deeptrack/datasets/endothelial_vs/endothelial_vs.py
@@ -0,0 +1,82 @@
+"""endothelial_vs dataset."""
+
+import tensorflow_datasets as tfds
+import numpy as np
+
+_DESCRIPTION = """
+"""
+
+_CITATION = """
+@article{korczak2022dynamic,
+ title={Dynamic live/apoptotic cell assay using phase-contrast imaging and deep learning},
+ author={Korczak, Zofia and Pineda, Jesus and Helgadottir, Saga and Midtvedt, Benjamin and Goks{\"o}r, Mattias and Volpe, Giovanni and Adiels, Caroline B},
+ journal={bioRxiv},
+ year={2022},
+ publisher={Cold Spring Harbor Laboratory}
+}
+"""
+
+
+class EndothelialVs(tfds.core.GeneratorBasedBuilder):
+ """DatasetBuilder for endothelial_vs dataset."""
+
+ VERSION = tfds.core.Version("1.0.0")
+ RELEASE_NOTES = {
+ "1.0.0": "Initial release.",
+ }
+
+ def _info(self) -> tfds.core.DatasetInfo:
+ """Returns the dataset metadata."""
+
+ return tfds.core.DatasetInfo(
+ builder=self,
+ description=_DESCRIPTION,
+ features=tfds.features.FeaturesDict(
+ {
+ "image": tfds.features.Image(
+ shape=(None, None, 1), dtype="uint16"
+ ),
+ "label": tfds.features.Image(
+ shape=(None, None, 1), dtype="uint16"
+ ),
+ }
+ ),
+ supervised_keys=("image", "label"),
+ homepage="https://dataset-homepage/",
+ citation=_CITATION,
+ )
+
+ def _split_generators(self, dl_manager: tfds.download.DownloadManager):
+ """Returns SplitGenerators."""
+
+ path = dl_manager.download_and_extract(
+ "https://drive.google.com/u/1/uc?id=10gqn0MwuxgyfJVWDZ6a8xduQuGAke4K3&export=download"
+ )
+
+ return {
+ "train": self._generate_examples(
+ path / "LiveDeadDataset" / "training"
+ ),
+ "test": self._generate_examples(
+ path / "LiveDeadDataset" / "validation"
+ ),
+ }
+
+ def _generate_examples(self, path):
+ """Yields examples."""
+
+ images_path = list(path.glob("*ch00*.tif"))
+
+ for p in images_path:
+ image = tfds.core.lazy_imports.tifffile.imread(p)
+ image = np.expand_dims(image, axis=-1)
+
+ label = tfds.core.lazy_imports.tifffile.imread(
+ str(p).replace("ch00", "ch01")
+ )
+ label = np.expand_dims(label, axis=-1)
+
+ yield p.name, {
+ "image": image,
+ "label": label,
+ }
diff --git a/deeptrack/datasets/regression_diffusion_landscape/__init__.py b/deeptrack/datasets/regression_diffusion_landscape/__init__.py
new file mode 100644
index 000000000..55c8b4b38
--- /dev/null
+++ b/deeptrack/datasets/regression_diffusion_landscape/__init__.py
@@ -0,0 +1,3 @@
+"""regresion_diffusion_landscape dataset."""
+
+from .regression_diffusion_landscape import RegressionDiffusionLandscape
diff --git a/deeptrack/datasets/regression_diffusion_landscape/checksums.tsv b/deeptrack/datasets/regression_diffusion_landscape/checksums.tsv
new file mode 100644
index 000000000..def432726
--- /dev/null
+++ b/deeptrack/datasets/regression_diffusion_landscape/checksums.tsv
@@ -0,0 +1 @@
+https://drive.google.com/u/1/uc?id=1hiBGuJ0OdcHx6XaNEOqttaw_OmculCXY&export=download 505299704 155269ff2291c4b0e975f939c3e2d719c86098b32893eb9087282e0a0ce0a172 DiffusionLandscapeDataset.zip
diff --git a/deeptrack/datasets/regression_diffusion_landscape/dummy_data/TODO-add_fake_data_in_this_directory.txt b/deeptrack/datasets/regression_diffusion_landscape/dummy_data/TODO-add_fake_data_in_this_directory.txt
new file mode 100644
index 000000000..e69de29bb
diff --git a/deeptrack/datasets/regression_diffusion_landscape/regression_diffusion_landscape.py b/deeptrack/datasets/regression_diffusion_landscape/regression_diffusion_landscape.py
new file mode 100644
index 000000000..0e34e74b1
--- /dev/null
+++ b/deeptrack/datasets/regression_diffusion_landscape/regression_diffusion_landscape.py
@@ -0,0 +1,134 @@
+"""dmdataset dataset."""
+
+import tensorflow_datasets as tfds
+import tensorflow as tf
+
+import os
+import scipy
+
+_DESCRIPTION = """
+"""
+
+_CITATION = """
+"""
+
+
+
+class RegressionDiffusionLandscape(tfds.core.GeneratorBasedBuilder):
+ VERSION = tfds.core.Version("1.0.0")
+ RELEASE_NOTES = {
+ "1.0.0": "Initial release.",
+ }
+
+ def _info(self) -> tfds.core.DatasetInfo:
+ """Returns the dataset metadata."""
+
+ NODE_FEATURES = self.get_features()
+
+ return tfds.core.DatasetInfo(
+ builder=self,
+ description=_DESCRIPTION,
+ features=tfds.features.FeaturesDict(
+ {
+ "graph": tfds.features.FeaturesDict(
+ {
+ **{
+ key: tfds.features.Tensor(
+ shape=feature[0],
+ dtype=feature[1],
+ )
+ for key, feature in NODE_FEATURES[
+ "graph"
+ ].items()
+ },
+ }
+ ),
+ "labels": tfds.features.FeaturesDict(
+ {
+ **{
+ key: tfds.features.Tensor(
+ shape=feature[0],
+ dtype=feature[1],
+ )
+ for key, feature in NODE_FEATURES[
+ "labels"
+ ].items()
+ },
+ }
+ ),
+ "sets": tfds.features.FeaturesDict(
+ {
+ **{
+ key: tfds.features.Tensor(
+ shape=feature[0],
+ dtype=feature[1],
+ )
+ for key, feature in NODE_FEATURES[
+ "sets"
+ ].items()
+ },
+ }
+ ),
+ }
+ ),
+ supervised_keys=None,
+ homepage="https://dataset-homepage/",
+ citation=_CITATION,
+ )
+
+ def _split_generators(self, dl_manager: tfds.download.DownloadManager):
+ """Returns SplitGenerators."""
+ path = dl_manager.download_and_extract(
+ "https://drive.google.com/u/1/uc?id=1hiBGuJ0OdcHx6XaNEOqttaw_OmculCXY&export=download"
+ )
+
+ return {
+ "train": self._generate_examples(
+ os.path.join(path, "DiffusionLandscapeDataset", "training")
+ ),
+ "test": self._generate_examples(
+ os.path.join(path, "DiffusionLandscapeDataset", "validation")
+ ),
+ }
+
+ def _generate_examples(self, path):
+ """Yields examples."""
+ data = [{}, {}, {}]
+ for i, subdict in enumerate(self.get_features().values()):
+ files = (*subdict.keys(),)
+
+ for file in files:
+ data_elem = scipy.sparse.load_npz(
+ os.path.join(path, file + ".npz")
+ ).toarray()
+ data_elem = (
+ data_elem[0] if data_elem.shape[0] == 1 else data_elem
+ )
+
+ data[i][file] = data_elem
+
+ yield "key", {
+ "graph": data[0],
+ "labels": data[1],
+ "sets": data[2],
+ }
+
+ def get_features(self):
+ return {
+ "graph": {
+ "frame": [(None, 1), tf.float64],
+ "node_features": [(None, 3), tf.float64],
+ "edge_features": [(None, 1), tf.float64],
+ "edge_indices": [(None, 2), tf.int64],
+ "edge_dropouts": [(None, 2), tf.float64],
+ },
+ "labels": {
+ "node_labels": [(None,), tf.float64],
+ "edge_labels": [(None,), tf.float64],
+ "global_labels": [(None,), tf.float64],
+ },
+ "sets": {
+ "node_sets": [(None, 2), tf.int64],
+ "edge_sets": [(None, 3), tf.int64],
+ },
+ }
diff --git a/deeptrack/models/gans/cgan.py b/deeptrack/models/gans/cgan.py
index 560449a5a..0eb33be68 100644
--- a/deeptrack/models/gans/cgan.py
+++ b/deeptrack/models/gans/cgan.py
@@ -104,8 +104,8 @@ def train_step(self, data):
shape = tf.shape(disc_pred_real)
valid, fake = tf.ones(shape), tf.zeros(shape)
d_loss = (
- self.discriminator.compiled_loss(disc_pred_real, valid)
- + self.discriminator.compiled_loss(disc_pred_fake, fake)
+ self.discriminator.compiled_loss(valid, disc_pred_real)
+ + self.discriminator.compiled_loss(fake, disc_pred_fake)
) / 2
# Compute gradient and apply gradient
@@ -124,8 +124,8 @@ def train_step(self, data):
batch_y_copies = [batch_y] * (self.num_losses - 1)
g_loss = self.assemble.compiled_loss(
- [assemble_output[0], *generated_image_copies],
[valid, *batch_y_copies],
+ [assemble_output[0], *generated_image_copies],
)
# Compute gradient and apply gradient
diff --git a/deeptrack/models/gans/pcgan.py b/deeptrack/models/gans/pcgan.py
index e728ffe5e..1ed0f557d 100644
--- a/deeptrack/models/gans/pcgan.py
+++ b/deeptrack/models/gans/pcgan.py
@@ -64,7 +64,7 @@ def __init__(
metrics=[],
**kwargs
):
- super(PCGAN).__init__()
+ super().__init__()
# Build and compile the discriminator
self.discriminator = discriminator
@@ -146,8 +146,8 @@ def train_step(self, data):
shape = tf.shape(disc_pred_1)
valid, fake = tf.ones(shape), tf.zeros(shape)
d_loss = (
- self.discriminator.compiled_loss(disc_pred_1, valid)
- + self.discriminator.compiled_loss(disc_pred_2, fake)
+ self.discriminator.compiled_loss(valid, disc_pred_1)
+ + self.discriminator.compiled_loss(fake, disc_pred_2)
) / 2
# Compute gradient and apply gradient
@@ -168,12 +168,12 @@ def train_step(self, data):
batch_y_copies = [batch_y] * (self.num_losses - 1)
g_loss = self.assemble.compiled_loss(
+ [valid, perceptual_valid, *batch_y_copies],
[
assemble_output[0],
assemble_output[1],
*generated_image_copies,
],
- [valid, perceptual_valid, *batch_y_copies],
)
# Compute gradient and apply gradient
diff --git a/deeptrack/models/gnns/__init__.py b/deeptrack/models/gnns/__init__.py
index 99dd1803f..9dded5dbd 100644
--- a/deeptrack/models/gnns/__init__.py
+++ b/deeptrack/models/gnns/__init__.py
@@ -1,4 +1,5 @@
from .models import *
from .graphs import *
+from .generators import *
from .utils import *
diff --git a/deeptrack/models/gnns/augmentations.py b/deeptrack/models/gnns/augmentations.py
index 06c585278..d23b7c129 100644
--- a/deeptrack/models/gnns/augmentations.py
+++ b/deeptrack/models/gnns/augmentations.py
@@ -37,6 +37,33 @@ def inner(data):
return inner
+def GetSubGraph(num_nodes, node_start):
+ def inner(data):
+ graph, labels, *_ = data
+
+ edge_connects_removed_node = np.any(
+ (graph[2] < node_start) | (graph[2] >= node_start + num_nodes),
+ axis=-1,
+ )
+
+ node_features = graph[0][node_start : node_start + num_nodes]
+ edge_features = graph[1][~edge_connects_removed_node]
+ edge_connections = graph[2][~edge_connects_removed_node] - node_start
+ weights = graph[3][~edge_connects_removed_node]
+
+ node_labels = labels[0][node_start : node_start + num_nodes]
+ edge_labels = labels[1][~edge_connects_removed_node]
+ global_labels = labels[2]
+
+ return (node_features, edge_features, edge_connections, weights), (
+ node_labels,
+ edge_labels,
+ global_labels,
+ )
+
+ return inner
+
+
def GetSubGraphFromLabel(samples):
"""
Returns a function that takes a graph and returns a subgraph
diff --git a/deeptrack/models/gnns/layers.py b/deeptrack/models/gnns/layers.py
index da03b1f2b..d90d04d71 100644
--- a/deeptrack/models/gnns/layers.py
+++ b/deeptrack/models/gnns/layers.py
@@ -215,7 +215,8 @@ def __init__(
# node update layer
self.update_layer = layers.GRU(filters, time_major=True)
-
+ self.update_norm = tf.keras.layers.Layer()
+
def update_node_features(self, nodes, aggregated, learnable_embs, edges):
Combined = tf.reshape(
tf.stack([nodes, aggregated], axis=0), (2, -1, nodes.shape[-1])
diff --git a/deeptrack/models/gnns/readme.md b/deeptrack/models/gnns/readme.md
index d2559691a..cbd6f1612 100644
--- a/deeptrack/models/gnns/readme.md
+++ b/deeptrack/models/gnns/readme.md
@@ -12,16 +12,18 @@ MAGIK requires at least python 3.6.
To install MAGIK you must install the [Deeptrack](https://github.com/softmatterlab/DeepTrack-2.0) framework. Open a terminal or command prompt and run:
pip install deeptrack
-
## Software requirements
+
### OS Requirements
+
MAGIK has been tested on the following systems:
-+ macOS: Monterey (12.2.1)
-+ Windows: 10 (64-bit)
+- macOS: Monterey (12.2.1)
+- Windows: 10 (64-bit)
### Python Dependencies
+
```
tensorflow
numpy
@@ -32,20 +34,19 @@ pydata-sphinx-theme
numpydoc
scikit-image
tensorflow-probability
-pydeepimagej
pint
pandas
```
-If you have a very recent version of python, you may need to install numpy _before_ DeepTrack. This is a known issue with scikit-image.
+If you have a very recent version of python, you may need to install numpy _before_ DeepTrack. This is a known issue with scikit-image.
## It's a kind of MAGIK...
To see MAGIK in action, we provide an [example](//github.com/softmatterlab/DeepTrack-2.0/blob/develop/examples/MAGIK/) based on live-cell migration experiments. Data courtesy of Sergi Masó Orriols, [the QuBI lab](https://mon.uvic.cat/qubilab/).
-
## Cite us!
+
If you use MAGIK in your project, please cite our article:
```
diff --git a/deeptrack/models/gnns/utils.py b/deeptrack/models/gnns/utils.py
index 4865d1acf..174336eac 100644
--- a/deeptrack/models/gnns/utils.py
+++ b/deeptrack/models/gnns/utils.py
@@ -87,10 +87,9 @@ def to_trajectories(
return trajectories
-def get_predictions(dfs, properties, model, graph_kwargs):
+def get_predictions(dfs, properties, model, **graph_kwargs):
"""
Get predictions from nodes dataframe.
-
Parameters
----------
dfs: DataFrame
@@ -103,7 +102,7 @@ def get_predictions(dfs, properties, model, graph_kwargs):
Extra arguments to be passed to the graph extractor.
"""
grapht = GraphExtractor(
- nodesdf=dfs, properties=properties, validation=True, **graph_kwargs.properties()
+ nodesdf=dfs, properties=properties, validation=True, **graph_kwargs
)
v = [
diff --git a/deeptrack/models/layers.py b/deeptrack/models/layers.py
index 471d5a525..2887c70c6 100644
--- a/deeptrack/models/layers.py
+++ b/deeptrack/models/layers.py
@@ -58,6 +58,7 @@ def ConvolutionalBlock(
strides=1,
normalization=False,
norm_kwargs={},
+ activation_first=True,
**kwargs,
):
"""A single 2d convolutional layer.
@@ -92,7 +93,7 @@ def Layer(filters, **kwargs_inner):
**kwargs_inner,
)
return lambda x: single_layer_call(
- x, layer, activation, normalization, norm_kwargs
+ x, layer, activation, normalization, norm_kwargs, activation_first
)
return Layer
@@ -183,6 +184,7 @@ def DeconvolutionalBlock(
strides=2,
normalization=False,
norm_kwargs={},
+ activation_first=True,
**kwargs,
):
"""A single 2d deconvolutional layer.
@@ -217,7 +219,7 @@ def Layer(filters, **kwargs_inner):
**kwargs_inner,
)
return lambda x: single_layer_call(
- x, layer, activation, normalization, norm_kwargs
+ x, layer, activation, normalization, norm_kwargs, activation_first
)
return Layer
@@ -234,6 +236,7 @@ def StaticUpsampleBlock(
padding="same",
with_conv=True,
norm_kwargs={},
+ activation_first=True,
**kwargs,
):
"""A single no-trainable 2d deconvolutional layer.
@@ -270,7 +273,12 @@ def call(x):
y = layer(x)
if with_conv:
return single_layer_call(
- y, conv, activation, normalization, norm_kwargs
+ y,
+ conv,
+ activation,
+ normalization,
+ norm_kwargs,
+ activation_first,
)
else:
return layer(x)
@@ -287,6 +295,7 @@ def ResidualBlock(
strides=1,
normalization="BatchNormalization",
norm_kwargs={},
+ activation_first=True,
**kwargs,
):
"""A 2d residual layer with two convolutional steps.
@@ -326,7 +335,12 @@ def Layer(filters, **kwargs_inner):
def call(x):
y = single_layer_call(
- x, conv, activation, normalization, norm_kwargs
+ x,
+ conv,
+ activation,
+ normalization,
+ norm_kwargs,
+ activation_first,
)
y = single_layer_call(y, conv2, None, normalization, norm_kwargs)
y = layers.Add()([identity(x), y])
@@ -803,7 +817,6 @@ def __init__(
else:
self.FwdMlpLayer = fwd_mlp_layer(**fwd_mlp_kwargs)
-
self.norm_0, self.norm_1 = (
as_normalization(normalization)(**norm_kwargs),
as_normalization(normalization)(**norm_kwargs),
diff --git a/deeptrack/models/utils.py b/deeptrack/models/utils.py
index 054277f09..2d01c8eba 100644
--- a/deeptrack/models/utils.py
+++ b/deeptrack/models/utils.py
@@ -247,57 +247,57 @@ def fit(self, x, *args, batch_size=32, generator_kwargs={}, **kwargs):
return self.model.fit(x, *args, batch_size=batch_size, **kwargs)
- def export(
- self,
- path,
- minimum_size,
- preprocessing=None,
- dij_config=None,
- ):
- """Export model unto the BioImage Model Zoo format for use with Fiji and ImageJ.
-
- Uses pyDeepImageJ by E. Gómez-de-Mariscal, C. García-López-de-Haro, L. Donati, M. Unser,
- A. Muñoz-Barrutia and D. Sage for exporting.
-
- DeepImageJ, used for loading the models into ImageJ, is only compatible with
- tensorflow==2.2.1. Models using newer features may not load correctly.
-
- Pre-processing of the data should be defined when creating the model using the preprocess
- keyword. Post-processing should be left to other imageJ functionality. If this is not
- sufficient, see `https://github.com/deepimagej/pydeepimagej` for what to pass to the
- preprocessing and postprocessing arguments.
-
- Parameters
- ----------
- path : str
- Path to store exported files.
- minimum_size : int
- For models where the input size is not fixed (e.g. (None, None 1)), the input
- is required to be a multiple of this value.
- preprocessing : Feature or Layer
- Additional preprocessing. Will be saved as a part of the network, and as
- such need to be compatible with tensorflow tensor operations. Assumed to have the
- same input shape as the first layer of the network.
- dij_config : BioImageModelZooConfig, optional
- Configuration used for deployment. See `https://github.com/deepimagej/pydeepimagej` for
- list of options. If None, a basic config is created for you.
- """
- from pydeepimagej.yaml import BioImageModelZooConfig
-
- # TODO: Does not yet fully work as intended. Debugging proved to be hard.
- inp = layers.Input(shape=self.model.layers[0].input_shape)
- model = self.model
-
- if preprocessing:
- processed_inp = preprocessing(inp)
- model = model(processed_inp)
- model = models.Model(inp, model)
-
- dij_config = BioImageModelZooConfig(model, minimum_size)
- dij_config.Name = "DeepTrack 2.1 model"
-
- dij_config.add_weights_formats(model, "Tensorflow", authors=dij_config.Authors)
- dij_config.export_model(path)
+ # def export(
+ # self,
+ # path,
+ # minimum_size,
+ # preprocessing=None,
+ # dij_config=None,
+ # ):
+ # """Export model unto the BioImage Model Zoo format for use with Fiji and ImageJ.
+
+ # Uses pyDeepImageJ by E. Gómez-de-Mariscal, C. García-López-de-Haro, L. Donati, M. Unser,
+ # A. Muñoz-Barrutia and D. Sage for exporting.
+
+ # DeepImageJ, used for loading the models into ImageJ, is only compatible with
+ # tensorflow==2.2.1. Models using newer features may not load correctly.
+
+ # Pre-processing of the data should be defined when creating the model using the preprocess
+ # keyword. Post-processing should be left to other imageJ functionality. If this is not
+ # sufficient, see `https://github.com/deepimagej/pydeepimagej` for what to pass to the
+ # preprocessing and postprocessing arguments.
+
+ # Parameters
+ # ----------
+ # path : str
+ # Path to store exported files.
+ # minimum_size : int
+ # For models where the input size is not fixed (e.g. (None, None 1)), the input
+ # is required to be a multiple of this value.
+ # preprocessing : Feature or Layer
+ # Additional preprocessing. Will be saved as a part of the network, and as
+ # such need to be compatible with tensorflow tensor operations. Assumed to have the
+ # same input shape as the first layer of the network.
+ # dij_config : BioImageModelZooConfig, optional
+ # Configuration used for deployment. See `https://github.com/deepimagej/pydeepimagej` for
+ # list of options. If None, a basic config is created for you.
+ # """
+ # from pydeepimagej.yaml import BioImageModelZooConfig
+
+ # # TODO: Does not yet fully work as intended. Debugging proved to be hard.
+ # inp = layers.Input(shape=self.model.layers[0].input_shape)
+ # model = self.model
+
+ # if preprocessing:
+ # processed_inp = preprocessing(inp)
+ # model = model(processed_inp)
+ # model = models.Model(inp, model)
+
+ # dij_config = BioImageModelZooConfig(model, minimum_size)
+ # dij_config.Name = "DeepTrack 2.1 model"
+
+ # dij_config.add_weights_formats(model, "Tensorflow", authors=dij_config.Authors)
+ # dij_config.export_model(path)
def get(self, image, add_batch_dimension_on_resolve, **kwargs):
if add_batch_dimension_on_resolve:
diff --git a/deeptrack/scatterers.py b/deeptrack/scatterers.py
index a899a253a..341eb0e58 100644
--- a/deeptrack/scatterers.py
+++ b/deeptrack/scatterers.py
@@ -748,6 +748,7 @@ def get(
]
mask = np.exp(-0.5 * (x**2 + y**2) / ((sigma) ** 2))
+ mask = image.maybe_cupy(mask)
arr = arr * mask
fourier_field = np.fft.fft2(arr)
diff --git a/deeptrack/test/test_layers.py b/deeptrack/test/test_layers.py
index b74859672..d06c0b715 100644
--- a/deeptrack/test/test_layers.py
+++ b/deeptrack/test/test_layers.py
@@ -282,6 +282,20 @@ def test_Masked_FGNN_layer(self):
),
)
self.assertTrue(model.layers[-1], layers.MaskedFGNN)
+
+ def test_GRUMPN_layer(self):
+ block = layers.GRUMPNLayer()
+ model = makeMinimalModel(
+ block(96),
+ input_layer=(
+ k_layers.Input(shape=(None, 96)),
+ k_layers.Input(shape=(None, 10)),
+ k_layers.Input(shape=(None, 2), dtype=tf.int32),
+ k_layers.Input(shape=(None, 1)),
+ k_layers.Input(shape=(None, 2)),
+ ),
+ )
+ self.assertTrue(model.layers[-1], layers.GRUMPN)
def test_GraphTransformer(self):
block = layers.GraphTransformerLayer()
diff --git a/deeptrack/test/test_scatterers.py b/deeptrack/test/test_scatterers.py
index 6d09c278b..c830f9ff9 100644
--- a/deeptrack/test/test_scatterers.py
+++ b/deeptrack/test/test_scatterers.py
@@ -294,6 +294,28 @@ def test_MieSphere(self):
imaged_scatterer_1.update().resolve()
+ def test_MieSphere_Coherence_length(self):
+ optics_1 = Brightfield(
+ NA=0.15,
+ wavelength=633e-9,
+ resolution=2e-6,
+ magnification=1,
+ output_region=(0, 0, 256, 256),
+ return_field=True,
+ )
+
+ scatterer = scatterers.MieSphere(
+ position=(128, 128),
+ radius=3e-6,
+ refractive_index=1.45 + 0.1j,
+ z=2612 * 1e-6,
+ coherence_length=5.9e-05,
+ )
+
+ imaged_scatterer_1 = optics_1(scatterer)
+
+ imaged_scatterer_1.update().resolve()
+
def test_MieStratifiedSphere(self):
optics_1 = Brightfield(
NA=0.7,
@@ -324,4 +346,4 @@ def test_MieStratifiedSphere(self):
if __name__ == "__main__":
- unittest.main()
\ No newline at end of file
+ unittest.main()
diff --git a/requirements.txt b/requirements.txt
index 97f7ab0ac..fc5f21dfe 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -8,7 +8,6 @@ numpydoc
scikit-image
tensorflow-probability
tensorflow-datasets
-pydeepimagej
more_itertools
pint<0.20
pandas
diff --git a/setup.py b/setup.py
index f0b657b6a..7de8fe798 100644
--- a/setup.py
+++ b/setup.py
@@ -22,7 +22,7 @@
setuptools.setup(
name="deeptrack", # Replace with your own username
- version="1.5.0a5",
+ version="1.5.2",
author="Benjamin Midtvedt",
author_email="benjamin.midtvedt@physics.gu.se",
description="A deep learning oriented microscopy image simulation package",