Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Use bioimageio.core.build_spec to generate stardist export #171

Merged
merged 41 commits into from
Mar 2, 2022
Merged
Show file tree
Hide file tree
Changes from 9 commits
Commits
Show all changes
41 commits
Select commit Hold shift + click to select a range
bcadb48
Use bioimageio.core.build_spec to generate stardist export
constantinpape Nov 3, 2021
77395ff
Add bioimageio.core to requirements
constantinpape Nov 3, 2021
d919e5d
Update bioimageio export
constantinpape Nov 4, 2021
3536ae8
Make bioimageio.core optional dependency
constantinpape Nov 4, 2021
687b373
Fix small errors
constantinpape Nov 4, 2021
3513897
Concatenate output tensor for tf weights
constantinpape Nov 8, 2021
718a7b0
Hard-code correct output name for tf bioimageio export
constantinpape Nov 8, 2021
46a541e
Use tf model for prediction in bioimageio-utils
constantinpape Nov 9, 2021
da8d86f
First working version of bioimageio export
constantinpape Nov 11, 2021
deaeb46
Refactor imports
uschmidt83 Nov 15, 2021
bd581cb
Fix problem with running bioimageio export multiple times
uschmidt83 Nov 15, 2021
1ae3957
Make it work for tensorflow 1 & 2 (hopefully)
uschmidt83 Nov 16, 2021
75e6c46
Update test config (WIP)
uschmidt83 Nov 16, 2021
66ffcdd
Try to fix test config
uschmidt83 Nov 16, 2021
c2fb703
Test 3D model export; tweak code
uschmidt83 Nov 16, 2021
c828fa7
Disable bioimageio tests for tensorflow 2.x
uschmidt83 Nov 16, 2021
22990d1
Github actions syntax error (again...)
uschmidt83 Nov 16, 2021
0178582
Use csbdeep.move_image_axes WIP
constantinpape Nov 22, 2021
06a3d66
Revert "Use csbdeep.move_image_axes WIP"
uschmidt83 Nov 22, 2021
c6da276
Major refactoring
uschmidt83 Nov 22, 2021
d4bab08
Fix output shape spec WIP
constantinpape Nov 23, 2021
4cccfd6
Support inference for bioimage.io models; add example
uschmidt83 Nov 23, 2021
ef3e48f
Add halo WIP
constantinpape Nov 25, 2021
86ac602
Adapt to changes in bioimageio.build_model and some cosmetic changes
constantinpape Jan 12, 2022
04d849f
Add macro for stardist postprocessing in deepIJ to modelzoo export
constantinpape Jan 12, 2022
c3af5c8
Fix issues with postprocessing macro
constantinpape Jan 14, 2022
3cb0143
Rename postprocessing macro and add missing command to it
constantinpape Jan 22, 2022
d7eae81
Fix how the macro file is added to package data
constantinpape Jan 25, 2022
8ed9ae6
Remove ijm file
uschmidt83 Feb 4, 2022
7a2f686
Refactoring and fixes
uschmidt83 Feb 4, 2022
b39fb7d
Export StarDist/Keras weights separately
uschmidt83 Feb 4, 2022
1546daf
Add import_bioimageio function, remove BioimageioModel class
uschmidt83 Feb 5, 2022
6f08a9c
Update halo computation
uschmidt83 Feb 6, 2022
17ffe4f
Replace mutable default value with None
uschmidt83 Feb 7, 2022
c120153
Update notebook
uschmidt83 Feb 7, 2022
58ca216
Only create bioimage.io zip file during export
uschmidt83 Feb 7, 2022
95a1200
Update bioimageio export function to be compatible with new bioimagei…
constantinpape Feb 28, 2022
7d7e4f8
Merge pull request #2 from constantinpape/modelzoo-core-v05
constantinpape Feb 28, 2022
db86a7a
Update tags
uschmidt83 Mar 1, 2022
6ab6d5f
Adjust input_min_shape if necessary
uschmidt83 Mar 1, 2022
797744e
Update tests and tags
uschmidt83 Mar 2, 2022
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
2 changes: 1 addition & 1 deletion .github/workflows/tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,6 @@ jobs:

- name: Install package (Linux and Windows)
if: startsWith(matrix.os, 'macos') == false
run: pip install ".[test]"
run: pip install ".[test,bioimageio]"

- run: pytest -v --durations=50 -m "not gpu"
1 change: 1 addition & 0 deletions setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -114,6 +114,7 @@ def build_extension(self, ext):
extras_require={
"tf1": ["csbdeep[tf1]>=0.6.3"],
"test": ["pytest"],
"bioimageio": ["bioimageio.core"],
},

)
1 change: 1 addition & 0 deletions stardist/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,3 +18,4 @@ def format_warning(message, category, filename, lineno, line=''):
from .plot.render import render_label, render_label_pred
from .rays3d import rays_from_json, Rays_Cartesian, Rays_SubDivide, Rays_Tetra, Rays_Octo, Rays_GoldenSpiral, Rays_Explicit
from .sample_patches import sample_patches
from .bioimageio_utils import export_bioimageio
277 changes: 277 additions & 0 deletions stardist/bioimageio_utils.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,277 @@
from pathlib import Path
from pkg_resources import get_distribution
from importlib_metadata import metadata
from itertools import chain
from zipfile import ZipFile

import numpy as np
import tensorflow as tf
from csbdeep.utils import axes_check_and_normalize, normalize

try:
from bioimageio.core.build_spec import build_model
except ImportError:
build_model = None


def _create_stardist_dependencies(outdir):
pkg_info = get_distribution("stardist")
reqs = ("tensorflow", ) + tuple(map(str, pkg_info.requires()))
path = outdir / "requirements.txt"
with open(path, "w") as f:
f.write("\n".join(reqs))
return f"pip:{path}"


def _create_stardist_doc(outdir):
doc_path = outdir / "README.md"
text = (
"# StarDist Model\n"
"This is a model for instance segmentation of starconvex objects with stardist.\n"
"For details please check out the [stardist repo](https://github.com/stardist/stardist)."
)
with open(doc_path, "w") as f:
f.write(text)
return doc_path


def _get_stardist_metadata(outdir):
package_data = metadata("stardist")
doi_2d = "https://doi.org/10.1007/978-3-030-00934-2_30"
doi_3d = "https://doi.org/10.1109/WACV45572.2020.9093435"
data = dict(
description=package_data["Summary"],
authors=list(dict(name=name.strip()) for name in package_data["Author"].split(",")),
git_repo=package_data["Home-Page"],
license=package_data["License"],
dependencies=_create_stardist_dependencies(outdir),
cite={"Cell Detection with Star-Convex Polygons": doi_2d,
"Star-convex Polyhedra for 3D Object Detection and Segmentation in Microscopy": doi_3d},
tags=["stardist", "segmentation", "instance segmentation", "tensorflow"],
covers=["https://raw.githubusercontent.com/stardist/stardist/master/images/stardist_logo.jpg"],
documentation=_create_stardist_doc(outdir)
)
return data


# TODO factor that out (its the same as csbdeep.base_model)
def _get_weights_name(model, prefer="best"):
# get all weight files and sort by modification time descending (newest first)
weights_ext = ("*.h5", "*.hdf5")
weights_files = chain(*(model.logdir.glob(ext) for ext in weights_ext))
weights_files = reversed(sorted(weights_files, key=lambda f: f.stat().st_mtime))
weights_files = list(weights_files)
if len(weights_files) == 0:
raise ValueError("Couldn't find any network weights (%s) to load." % ', '.join(weights_ext))
weights_preferred = list(filter(lambda f: prefer in f.name, weights_files))
weights_chosen = weights_preferred[0] if len(weights_preferred) > 0 else weights_files[0]
return weights_chosen.name


# TODO we may need to permute axes for images with channel as well
def _expand_dims(x, axes):
n_expand = len(axes) - x.ndim
assert n_expand in (0, 1, 2)
if n_expand == 0:
return x

# batch should always be first
assert axes[0] == "b"
if n_expand == 1:
return x[None]

# channel first or channel last
assert axes[1] == "c" or axes[-1] == "c"
if axes[1] == "c":
expander = np.s_[None, None]
else:
expander = np.s_[None, ..., None]
expanded = x[expander]
assert expanded.ndim == len(axes)
return expanded
constantinpape marked this conversation as resolved.
Show resolved Hide resolved


def _predict_tf(model_path, test_input):
# need to unzip the weights
model_weights = model_path.parent / "tf_model"
with ZipFile(model_path, "r") as f:
f.extractall(model_weights)
with tf.Session() as sess:
tf_model = tf.saved_model.load_v2(str(model_weights))
x = tf.convert_to_tensor(test_input, dtype=tf.float32)
model = tf_model.signatures["serving_default"]
y = model(x)
sess.run(tf.global_variables_initializer())
output = sess.run(y["output"])
return output


def _get_weights_and_model_metadata(outdir, model, test_input, mode, prefer_weights, min_percentile, max_percentile):

# get the path to the weights
weights_name = _get_weights_name(model, prefer_weights)
if mode == "keras_hdf5":
raise NotImplementedError("Export to keras format is not supported yet")
weight_uri = model.logdir / weights_name
elif mode == "tensorflow_saved_model_bundle":
weight_uri = model.logdir / "TF_SavedModel.zip"
model.load_weights(weights_name)
model_csbdeep = model.export_TF(weight_uri, single_output=True, upsample_grid=True)
else:
raise ValueError(f"Unsupported mode: {mode}")

# TODO: this needs more attention, e.g. how axes are treated in a general way
axes = model.config.axes.lower()
# img_axes_in = axes_check_and_normalize(axes, model.config.n_dim+1)
net_axes_in = axes
net_axes_out = axes_check_and_normalize(model._axes_out).lower()
# net_axes_lost = set(net_axes_in).difference(set(net_axes_out))
# img_axes_out = ''.join(a for a in img_axes_in if a not in net_axes_lost)

ndim_tensor = model.config.n_dim + 2

# input shape including batch size
div_by = list(model._axes_div_by(net_axes_in))

if mode == "keras_hdf5":
output_names = ("prob", "dist") + (("class_prob",) if model._is_multiclass() else ())
output_n_channels = (1, model.config.n_rays,) + ((1,) if model._is_multiclass() else ())
output_scale = [1]+list(1/g for g in model.config.grid) + [0]

elif mode == "tensorflow_saved_model_bundle":
if model._is_multiclass():
raise NotImplementedError("Tensorflow SaveModel not supported for multiclass models yet")
# output_names = ("outputall",)
# output_n_channels = (1 + model.config.n_rays,)
# output_scale = [1]*(ndim_tensor-1) + [0]

# output_names = ("prob",)
# output_n_channels = (1,)
# output_scale = [1]*(ndim_tensor-1) + [0]

input_names = model_csbdeep.input_names
# NOTE model_csbdeep.output_names returns the wrong value; this needs to be the key that is passed to signature[signature_key].outputs[key]:
# https://github.com/bioimage-io/core-bioimage-io-python/blob/main/bioimageio/core/prediction_pipeline/_model_adapters/_tensorflow_model_adapter.py#L69
# which is "output".Iinstead, output_names is ["concatenate_4"].
# output_names = model_csbdeep.output_names
output_names = ["output"]

output_n_channels = (1 + model.config.n_rays,)
output_scale = [1]*(ndim_tensor-1) + [0]
uschmidt83 marked this conversation as resolved.
Show resolved Hide resolved

# TODO need config format that is compatible with deepimagej; discuss with Esti
# TODO do we need parameters for down/upsampling here?
package_data = metadata("stardist")
config = dict(
stardist=dict(
stardist_version=package_data["Version"],
thresholds=dict(nms=model.thresholds.nms, prob=model.thresholds.prob)
)
)

n_inputs = len(input_names)
assert n_inputs == 1
input_axes = "b" + net_axes_in.lower()
input_config = dict(
input_name=input_names,
input_step=[[0]+div_by] * n_inputs,
input_min_shape=[[1] + div_by] * n_inputs,
input_axes=[input_axes] * n_inputs,
input_data_range=[["-inf", "inf"]] * n_inputs,
preprocessing=[dict(scale_range=dict(
mode="per_sample",
# TODO mighe make it an option to normalize across channels ...
axes=net_axes_in.lower().replace("c", ""),
min_percentile=min_percentile,
max_percentile=max_percentile,
))] * n_inputs
)

n_outputs = len(output_names)
assert len(output_n_channels) == n_outputs
output_axes = "b" + net_axes_out.lower()
output_config = dict(
output_name=output_names,
output_data_range=[["-inf", "inf"]] * n_outputs,
output_axes=[output_axes] * n_outputs,
output_reference=[input_names[0]] * n_outputs,
output_scale=[output_scale] * n_outputs,
output_offset=[[1] * (ndim_tensor-1) + [n_channel] for n_channel in output_n_channels]
)

in_path = outdir / "test_input.npy"
np.save(in_path, _expand_dims(test_input, input_axes))

test_input = normalize(test_input, pmin=min_percentile, pmax=max_percentile)
if mode == "tensorflow_saved_model_bundle":
test_outputs = _predict_tf(weight_uri, _expand_dims(test_input, input_axes))
else:
test_outputs = model.predict(test_input)

out_paths = []
for i, out in enumerate(test_outputs):
p = outdir / f"test_output{i}.npy"
np.save(p, _expand_dims(out, output_axes))
out_paths.append(p)

data = dict(weight_uri=weight_uri, test_inputs=[in_path], test_outputs=out_paths, config=config)
data.update(input_config)
data.update(output_config)
return data


def export_bioimageio(
model,
outpath,
test_input,
name=None,
mode="tensorflow_saved_model_bundle",
prefer_weights="best",
min_percentile=1.0,
max_percentile=99.8,
overwrite_spec_kwargs={}
uschmidt83 marked this conversation as resolved.
Show resolved Hide resolved
):
"""Export stardist model into bioimageio format, https://github.com/bioimage-io/spec-bioimage-io.

Parameters
----------
model: StarDist2D, StarDist3d
the model to convert
outpath: str, Path
where to save the model
test_input: np.ndarray
input image for generating test data
name: str
the name of this model (default: None)
mode: str
(default: "tensorflow_saved_model_bundle")
prefer_weights: str
(default: "best")
overwrite_spec_kwargs: dict
(default: {})
"""
if build_model is None:
raise RuntimeError(
"bioimageio.core is required for modelzoo export."
"Install it via 'pip install bioimageio.core' or 'conda install -c conda-forge bioimageio.core'."
)
name = "StarDist Model" if name is None else name

outpath = Path(outpath)
if outpath.suffix == "":
outdir = outpath
zip_path = outdir / f"{name}.zip"
elif outpath.suffix == ".zip":
outdir = outpath.parent
zip_path = outpath
else:
raise ValueError(f"outpath has to be a folder or zip file, got {outpath.suffix}")
outdir.mkdir(exist_ok=True, parents=True)

kwargs = _get_stardist_metadata(outdir)
model_kwargs = _get_weights_and_model_metadata(outdir, model, test_input, mode, prefer_weights,
min_percentile=min_percentile, max_percentile=max_percentile)
kwargs.update(model_kwargs)
kwargs.update(overwrite_spec_kwargs)

build_model(name=name, output_path=zip_path, **kwargs)
44 changes: 44 additions & 0 deletions tests/test_bioimageio.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
import pytest
import numpy as np
from stardist.models import StarDist2D
from stardist.data import test_image_nuclei_2d as _test_image

try:
from bioimageio.core.resource_tests import test_model as _test
except ImportError:
_test = None


def _test_pretrained(tmp_path, model_name, test_image):
from stardist import export_bioimageio

model = StarDist2D.from_pretrained(model_name)
assert model is not None
out_path = tmp_path / f"{model_name}.zip"
export_bioimageio(model, out_path, test_input=test_image)
assert out_path.exists()
res = _test(out_path)
# breakpoint()
assert not res["error"]


@pytest.mark.skipif(_test is None, reason="Requires bioimageio.core")
def test_pretrained_fluo(tmp_path):
test_image = _test_image()
model_name = "2D_versatile_fluo"
_test_pretrained(tmp_path, model_name, test_image)


@pytest.mark.skipif(_test is None, reason="Requires bioimageio.core")
def test_pretrained_paper(tmp_path):
test_image = _test_image()
model_name = "2D_paper_dsb2018"
_test_pretrained(tmp_path, model_name, test_image)


@pytest.mark.skipif(_test is None, reason="Requires bioimageio.core")
def test_pretrained_he(tmp_path):
test_image = _test_image()
test_image = np.concatenate([test_image[..., None]] * 3, axis=-1)
model_name = "2D_versatile_he"
_test_pretrained(tmp_path, model_name, test_image)