Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
33 commits
Select commit Hold shift + click to select a range
eecd48d
Merge a8c66ebb98565c20f576bbf5b2fda4c8a57eec44 into 8a58cc19e838d683b…
BenjaminMidtvedt Jul 26, 2022
51c87f1
chore: autopublish 2022-07-26T13:54:44Z
github-actions[bot] Jul 26, 2022
b30c265
Remove create-badges job
BenjaminMidtvedt Jul 26, 2022
085b0e5
Delete test.py
BenjaminMidtvedt Jul 26, 2022
74b37b4
Add multi-head masked attention
JesusPinedaC Jul 26, 2022
b4debf5
Update multi-head gated attention to match parent layer
JesusPinedaC Jul 26, 2022
3ecb423
Update documentation
JesusPinedaC Jul 26, 2022
bbf7f4b
Test multi-head masked attention
JesusPinedaC Jul 27, 2022
5df728a
allow gated attention layers to use bias
JesusPinedaC Jul 27, 2022
337152e
test bias in gated attention layers
JesusPinedaC Jul 27, 2022
4eb3e10
set return_attention_weights to False to avoid multi-outputs
JesusPinedaC Jul 27, 2022
2affe63
reformat gnns/layers.py
JesusPinedaC Jul 29, 2022
26d064c
Update layers.py
JesusPinedaC Jul 29, 2022
c65f49d
Update test_layers.py
JesusPinedaC Jul 29, 2022
2ca0590
Update models.py
JesusPinedaC Jul 30, 2022
1d99b6b
Update test_models.py
JesusPinedaC Jul 30, 2022
49cf7ed
Update test_models.py
JesusPinedaC Jul 30, 2022
f455fa1
Merge pull request #129 from softmatterlab/jp/rev-MAGIK
BenjaminMidtvedt Aug 1, 2022
e73d5ed
Fix indexing problems related to tf.gather
JesusPinedaC Aug 2, 2022
71e67ff
Merge pull request #130 from softmatterlab/jp/fix-idx-gather
BenjaminMidtvedt Aug 3, 2022
790e8e2
Allow multi-inputs in ContinuousGenerator
JesusPinedaC Aug 15, 2022
d4e17c1
Fix bad conversion to integer
BenjaminMidtvedt Aug 22, 2022
ad2a95e
version bump
BenjaminMidtvedt Aug 22, 2022
fffe6b6
Merge pull request #131 from softmatterlab/jp/multi-input-generator
BenjaminMidtvedt Aug 22, 2022
af29362
Fix phase correction at focus and offset calculation
BenjaminMidtvedt Aug 26, 2022
d055d1a
Fix phase correction in propagation
BenjaminMidtvedt Aug 26, 2022
30d1489
Merge pull request #134 from softmatterlab/bm/fix-mie-phase
BenjaminMidtvedt Aug 26, 2022
277db0b
Fix mie phase out of foucs
BenjaminMidtvedt Sep 1, 2022
a6f4ba0
Fix mie phase out of foucs
BenjaminMidtvedt Sep 9, 2022
39e4c6d
Merge branch 'develop' of https://github.com/softmatterlab/DeepTrack-…
BenjaminMidtvedt Sep 9, 2022
4806835
Update README.md
giovannivolpe Sep 22, 2022
ff2aca1
Bm/version 1.4.0 (#137)
giovannivolpe Oct 4, 2022
d68fca4
Merge branch 'develop'
BenjaminMidtvedt Oct 4, 2022
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 1 addition & 15 deletions .github/workflows/python-app.yml
Original file line number Diff line number Diff line change
Expand Up @@ -10,21 +10,7 @@ on:
branches: [ develop, master ]

jobs:
create-badges:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
with:
persist-credentials: false # otherwise, the token used is the GITHUB_TOKEN, instead of your personal token
fetch-depth: 0 # otherwise, you will failed to push refs to dest repo
- name: Colab Badge Action
uses: trsvchn/colab-badge-action@v4
- name: Commit & Push changes
uses: actions-js/push@master
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
branch: ${{ github.head_ref }}


build:

strategy:
Expand Down
10 changes: 7 additions & 3 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -1,13 +1,12 @@
**/.ipynb_checkpoints
**/__pycache__
deeptrack-app/*
*/datasets/*

paper-examples/models/*

build/*
dist/*
*.egg-info/
*/datasets/*
*/theory
_src/build/**/*

Expand All @@ -16,6 +15,11 @@ ParticleSizing
CellData
ParticleTracking
data/
datasets/
examples/**/*/models/
**/node_modules/

*.tif
*.png
*.jpg
*.jpeg
*.npy
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,7 @@ The second series focuses on individual topics, introducing them in a natural or

Additionally, we have seven more case studies which are less documented, but gives additional insight in how to use DeepTrack with real datasets

1. [MNIST](examples/paper-examples/1_MNIST.ipynb) classifies handwritted digits.
1. [MNIST](examples/paper-examples/1-MNIST.ipynb) classifies handwritted digits.
2. [single particle tracking](examples/paper-examples/2-single_particle_tracking.ipynb) tracks experimentally captured videos of a single particle. (Requires opencv-python compiled with ffmpeg to open and read a video.)
3. [single particle sizing](examples/paper-examples/3-particle_sizing.ipynb) extracts the radius and refractive index of particles.
4. [multi-particle tracking](examples/paper-examples/4-multi-molecule-tracking.ipynb) detects quantum dots in a low SNR image.
Expand Down
3 changes: 3 additions & 0 deletions deeptrack/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
from pint import UnitRegistry, Context
from .backend.pint_definition import pint_definitions


units = UnitRegistry(pint_definitions.split("\n"))

import tensorflow as tf
Expand All @@ -27,6 +28,7 @@
from .statistics import *
from .holography import *


from .image import strip

from . import (
Expand All @@ -39,4 +41,5 @@
backend,
test,
visualization,
datasets,
)
24 changes: 24 additions & 0 deletions deeptrack/augmentations.py
Original file line number Diff line number Diff line change
Expand Up @@ -681,6 +681,30 @@ def image_to_crop(image):
)


class CropTight(Feature):
def __init__(self, eps=1e-10, **kwargs):
"""Crops input array to remove empty space.

Removes indices from the start and end of the array, where all values are below eps.

Currently only works for 3D arrays.

Parameters
----------
eps : float, optional
The threshold for considering a pixel to be empty, by default 1e-10"""
super().__init__(eps=eps, **kwargs)

def get(self, image, eps, **kwargs):
image = np.asarray(image)

image = image[..., np.any(image > eps, axis=(0, 1))]
image = image[np.any(image > eps, axis=(1, 2)), ...]
image = image[:, np.any(image > eps, axis=(0, 2)), :]

return image


class Pad(Augmentation):
"""Pads the image.

Expand Down
6 changes: 6 additions & 0 deletions deeptrack/datasets/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
from deeptrack.datasets import (
detection_QuantumDots,
segmentation_ssTEM_drosophila,
regression_holography_nanoparticles,
segmentation_fluorescence_u2os,
)
3 changes: 3 additions & 0 deletions deeptrack/datasets/detection_QuantumDots/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
"""detection_QuantumDots dataset."""

from .detection_QuantumDots import DetectionQuantumdots
2 changes: 2 additions & 0 deletions deeptrack/datasets/detection_QuantumDots/checksums.tsv
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
https://drive.google.com/file/d/1naaoxIaAU1F_rBaI-I1pB1K4Sp6pq_Jv/view?usp=sharing 67850 95f52b3bbfbf1b2fe7f213021fbd63bdf5040a4dc099ef6903243feb849f06c6 view
https://drive.google.com/u/1/uc?id=1naaoxIaAU1F_rBaI-I1pB1K4Sp6pq_Jv&export=download 543855765 375476e7a70fa3c1a8f91f2e4035b896b8d8acb1800dae2e1e028c2db485a030 QuantumDots.zip
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
"""detection_QuantumDots dataset."""

import tensorflow_datasets as tfds
from . import detection_QuantumDots


class DetectionQuantumdotsTest(tfds.testing.DatasetBuilderTestCase):
"""Tests for detection_QuantumDots dataset."""
# TODO(detection_QuantumDots):
DATASET_CLASS = detection_QuantumDots.DetectionQuantumdots
SPLITS = {
'train': 3, # Number of fake train example
'test': 1, # Number of fake test example
}

# If you are calling `download/download_and_extract` with a dict, like:
# dl_manager.download({'some_key': 'http://a.org/out.txt', ...})
# then the tests needs to provide the fake output paths relative to the
# fake data directory
# DL_EXTRACT_RESULT = {'some_key': 'output_file1.txt', ...}


if __name__ == '__main__':
tfds.testing.test_main()
69 changes: 69 additions & 0 deletions deeptrack/datasets/detection_QuantumDots/detection_QuantumDots.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,69 @@
"""detection_QuantumDots dataset."""

import tensorflow_datasets as tfds
import tensorflow as tf
import numpy as np

# TODO(detection_QuantumDots): Markdown description that will appear on the catalog page.
_DESCRIPTION = """
Sequential images of quantum dots in a fluorescent microscope. The dataset is unlabeled.
"""

# TODO(detection_QuantumDots): BibTeX citation
_CITATION = """
"""


class DetectionQuantumdots(tfds.core.GeneratorBasedBuilder):
"""DatasetBuilder for detection_QuantumDots dataset."""

VERSION = tfds.core.Version("1.0.0")
RELEASE_NOTES = {
"1.0.0": "Initial release.",
}

def _info(self) -> tfds.core.DatasetInfo:
"""Returns the dataset metadata."""
# TODO(detection_QuantumDots): Specifies the tfds.core.DatasetInfo object
return tfds.core.DatasetInfo(
builder=self,
description=_DESCRIPTION,
features=tfds.features.FeaturesDict(
{
# These are the features of your dataset like images, labels ...
"image": tfds.features.Image(
shape=(1200, 1200, 1),
dtype=tf.uint16,
),
}
),
# If there's a common (input, target) tuple from the
# features, specify them here. They'll be used if
# `as_supervised=True` in `builder.as_dataset`.
supervised_keys=None, # Set to `None` to disable
homepage="https://dataset-homepage/",
citation=_CITATION,
)

def _split_generators(self, dl_manager: tfds.download.DownloadManager):
"""Returns SplitGenerators."""
# TODO(detection_QuantumDots): Downloads the data and defines the splits
path = dl_manager.download_and_extract(
"https://drive.google.com/u/1/uc?id=1naaoxIaAU1F_rBaI-I1pB1K4Sp6pq_Jv&export=download"
)

# TODO(detection_QuantumDots): Returns the Dict[split names, Iterator[Key, Example]]
return {
"train": self._generate_examples(path / "QuantumDots"),
}

def _generate_examples(self, path):
"""Yields examples."""
tifpath = path / "Qdots.tif"

image_stack = tfds.core.lazy_imports.tifffile.imread(tifpath)
image_stack = np.expand_dims(image_stack, axis=-1)
for i, image in enumerate(image_stack):
yield str(i), {
"image": image,
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
"""regression_holography_nanoparticles dataset."""

from .regression_holography_nanoparticles import RegressionHolographyNanoparticles
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
# TODO(regression_holography_nanoparticles): If your dataset downloads files, then the checksums
# will be automatically added here when running
# `tfds build --register_checksums`.
Original file line number Diff line number Diff line change
@@ -0,0 +1,83 @@
"""regression_holography_nanoparticles dataset."""

import tensorflow_datasets as tfds
import tensorflow as tf
import numpy as np

# TODO(regression_holography_nanoparticles): Markdown description that will appear on the catalog page.
_DESCRIPTION = """
"""

# TODO(regression_holography_nanoparticles): BibTeX citation
_CITATION = """
"""


class RegressionHolographyNanoparticles(tfds.core.GeneratorBasedBuilder):
"""DatasetBuilder for regression_holography_nanoparticles dataset."""

VERSION = tfds.core.Version("1.0.0")
RELEASE_NOTES = {
"1.0.0": "Initial release.",
}

def _info(self) -> tfds.core.DatasetInfo:
"""Returns the dataset metadata."""
# TODO(regression_holography_nanoparticles): Specifies the tfds.core.DatasetInfo object
return tfds.core.DatasetInfo(
builder=self,
description=_DESCRIPTION,
features=tfds.features.FeaturesDict(
{
# These are the features of your dataset like images, labels ...
"image": tfds.features.Tensor(shape=(64, 64, 2), dtype=tf.float64),
"radius": tfds.features.Scalar(tf.float64),
"refractive_index": tfds.features.Scalar(tf.float64),
}
),
# If there's a common (input, target) tuple from the
# features, specify them here. They'll be used if
# `as_supervised=True` in `builder.as_dataset`.
supervised_keys=(
"image",
"radius",
"refractive_index",
), # Set to `None` to disable
homepage="https://dataset-homepage/",
citation=_CITATION,
)

def _split_generators(self, dl_manager: tfds.download.DownloadManager):
"""Returns SplitGenerators."""
# TODO(regression_holography_nanoparticles): Downloads the data and defines the splits
path = dl_manager.download_and_extract(
"https://drive.google.com/u/1/uc?id=1LJqWYmLj93WYLKaLm_yQFmiR1FZHhf1r&export=download"
)

# TODO(regression_holography_nanoparticles): Returns the Dict[split names, Iterator[Key, Example]]
return {
"train": self._generate_examples(path, "train"),
"test": self._generate_examples(path, "test"),
}

def _generate_examples(self, path, split):
"""Yields examples."""
# TODO(regression_holography_nanoparticles): Yields (key, example) tuples from the dataset

if split == "train":
data = np.load(path / "training_set.npy")
radius = np.load(path / "training_radius.npy")
refractive_index = np.load(path / "training_n.npy")
elif split == "test":
data = np.load(path / "validation_set.npy")
radius = np.load(path / "validation_radius.npy")
refractive_index = np.load(path / "validation_n.npy")
else:
raise ValueError("Split not recognized:", split)

for idx in range(data.shape[0]):
yield str(idx), {
"image": data[idx],
"radius": radius[idx],
"refractive_index": refractive_index[idx],
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
"""regression_holography_nanoparticles dataset."""

import tensorflow_datasets as tfds
from . import regression_holography_nanoparticles


class RegressionHolographyNanoparticlesTest(tfds.testing.DatasetBuilderTestCase):
"""Tests for regression_holography_nanoparticles dataset."""
# TODO(regression_holography_nanoparticles):
DATASET_CLASS = regression_holography_nanoparticles.RegressionHolographyNanoparticles
SPLITS = {
'train': 3, # Number of fake train example
'test': 1, # Number of fake test example
}

# If you are calling `download/download_and_extract` with a dict, like:
# dl_manager.download({'some_key': 'http://a.org/out.txt', ...})
# then the tests needs to provide the fake output paths relative to the
# fake data directory
# DL_EXTRACT_RESULT = {'some_key': 'output_file1.txt', ...}


if __name__ == '__main__':
tfds.testing.test_main()
3 changes: 3 additions & 0 deletions deeptrack/datasets/segmentation_fluorescence_u2os/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
"""segmentation_fluorescence_u2os dataset."""

from .segmentation_fluorescence_u2os import SegmentationFluorescenceU2os
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
# TODO(segmentation_fluorescence_u2os): If your dataset downloads files, then the checksums
# will be automatically added here when running
# `tfds build --register_checksums`.
Loading