From b2c3efd3a6e3db4b58a45e96c1e0650cb88e8bee Mon Sep 17 00:00:00 2001 From: Julian McGinnis <33037028+jqmcginnis@users.noreply.github.com> Date: Wed, 6 Dec 2023 09:51:34 +0100 Subject: [PATCH 01/20] Update setup.py Relax version requirements --- setup.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/setup.py b/setup.py index df5ada6..c953490 100644 --- a/setup.py +++ b/setup.py @@ -17,8 +17,8 @@ 'pillow<10.1.0', 'scipy>=1.9.0', 'scikit-image>=0.21.0', - 'tensorflow<2.12.0', - 'tensorflow-addons<0.20.0', + 'tensorflow', + 'tensorflow-addons', 'nibabel>=4.0.0', 'requests>=2.20.0' ], From 9469eb35549c2a1d99f4246b417ea68ca792f57a Mon Sep 17 00:00:00 2001 From: Julian McGinnis <33037028+jqmcginnis@users.noreply.github.com> Date: Wed, 6 Dec 2023 10:01:34 +0100 Subject: [PATCH 02/20] Update setup.py --- setup.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/setup.py b/setup.py index c953490..5609ac3 100644 --- a/setup.py +++ b/setup.py @@ -13,14 +13,14 @@ 'ms', 'lst', 'ai'], python_requires='>=3.8', install_requires = [ - 'numpy<1.24.0', - 'pillow<10.1.0', + 'numpy', + 'pillow', 'scipy>=1.9.0', 'scikit-image>=0.21.0', 'tensorflow', 'tensorflow-addons', - 'nibabel>=4.0.0', - 'requests>=2.20.0' + 'nibabel', + 'requests' ], scripts=['LST_AI/lst'], license='MIT', From e4eb57d040e9ad9754bb04a151caa9430cc0f808 Mon Sep 17 00:00:00 2001 From: Julian McGinnis <33037028+jqmcginnis@users.noreply.github.com> Date: Fri, 8 Dec 2023 14:42:39 +0100 Subject: [PATCH 03/20] Update tensorflow setup requirements --- setup.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/setup.py b/setup.py index 5609ac3..a7007f2 100644 --- a/setup.py +++ b/setup.py @@ -17,8 +17,8 @@ 'pillow', 'scipy>=1.9.0', 'scikit-image>=0.21.0', - 'tensorflow', - 'tensorflow-addons', + 'tensorflow-gpu>=2.14', + 'tensorflow-addons>=0.23', 'nibabel', 'requests' ], From 2243f4af745bed0d24f5729fca2d6466cdc7d2e1 Mon Sep 17 00:00:00 2001 From: Julian McGinnis <33037028+jqmcginnis@users.noreply.github.com> Date: Fri, 8 Dec 2023 14:51:59 +0100 Subject: [PATCH 04/20] Update setup.py --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index a7007f2..85213ad 100644 --- a/setup.py +++ b/setup.py @@ -17,7 +17,7 @@ 'pillow', 'scipy>=1.9.0', 'scikit-image>=0.21.0', - 'tensorflow-gpu>=2.14', + 'tensorflow-gpu>=2.12', 'tensorflow-addons>=0.23', 'nibabel', 'requests' From 1c05bb5ac58144e20fd82fa9bca95c6d16728f8c Mon Sep 17 00:00:00 2001 From: Julian McGinnis <33037028+jqmcginnis@users.noreply.github.com> Date: Fri, 8 Dec 2023 15:29:15 +0100 Subject: [PATCH 05/20] Update setup.py --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 85213ad..b6e7332 100644 --- a/setup.py +++ b/setup.py @@ -17,7 +17,7 @@ 'pillow', 'scipy>=1.9.0', 'scikit-image>=0.21.0', - 'tensorflow-gpu>=2.12', + 'tensorflow>=2.14', 'tensorflow-addons>=0.23', 'nibabel', 'requests' From 8fe6183de68d67e4deb18b3d91e68bcf0f4aabed Mon Sep 17 00:00:00 2001 From: Julian McGinnis Date: Fri, 8 Dec 2023 17:07:46 +0100 Subject: [PATCH 06/20] update dockerfile --- cpu/Dockerfile | 55 ++++++++++++++++++++++++++++++++++++-------------- 1 file changed, 40 insertions(+), 15 deletions(-) diff --git a/cpu/Dockerfile b/cpu/Dockerfile index 6357cd2..c46b0da 100644 --- a/cpu/Dockerfile +++ b/cpu/Dockerfile @@ -1,5 +1,6 @@ # Start with the base Ubuntu 22.04 LTS image -FROM ubuntu:22.04 +# FROM ubuntu:22.04 +FROM nvidia/cuda:12.3.1-runtime-ubuntu20.04 # Prevents prompts from asking for user input during package installation ENV DEBIAN_FRONTEND=noninteractive @@ -8,16 +9,22 @@ ENV DEBIAN_FRONTEND=noninteractive RUN apt-get update && apt-get install -y \ git \ wget \ - unzip \ - python3 \ - python3-pip + unzip + +# copied from https://stackoverflow.com/a/76170605/3485363 +RUN apt-get update && DEBIAN_FRONTEND=noninteractive \ + apt-get install -y software-properties-common && \ + add-apt-repository -y ppa:deadsnakes/ppa && \ + apt-get install -y python3.10 curl && \ + curl -sS https://bootstrap.pypa.io/get-pip.py | python3.10 + +# RUN curl -sSL https://install.python-poetry.org | python3.10 - --preview +RUN pip3 install --upgrade requests +RUN ln -fs /usr/bin/python3.10 /usr/bin/python +RUN python --version # Setup LST-AI RUN mkdir -p /custom_apps/lst_directory -WORKDIR /custom_apps/lst_directory -RUN git clone https://github.com/CompImg/LST-AI/ -WORKDIR /custom_apps/lst_directory/LST-AI -RUN pip install -e . # Setup for greedy (Choose either pre-compiled or compilation from source) @@ -27,6 +34,16 @@ RUN pip install -e . # Option 1: Download pre-compiled version of greedy # Download pre-compiled version of greedy and place in $PATH + +# https://sourceforge.net/projects/greedy-reg/files/Nightly/greedy-nightly-MacOS-x86_64.dmg/download + +# for MAC use this instead? +#WORKDIR /custom_apps/lst_directory +#RUN wget "https://sourceforge.net/projects/greedy-reg/files/Nightly/greedy-nightly-MacOS-x86_64.dmg/download" -o "greedy.dmg" && \ +# chmod +x greedy && \ +# mv greedy /usr/local/bin + + WORKDIR /custom_apps/lst_directory RUN wget "https://github.com/CompImg/LST-AI/releases/download/v1.0.0/greedy" && \ chmod +x greedy && \ @@ -87,13 +104,6 @@ RUN git clone https://github.com/MIC-DKFZ/HD-BET WORKDIR /custom_apps/lst_directory/HD-BET RUN pip install -e . -# Retrieve model weights and files for LST-AI -WORKDIR /custom_apps/lst_directory/ -RUN wget -O /custom_apps/lst_directory/LST-AI/LST_AI/data.zip \ - https://github.com/CompImg/LST-AI/releases/download/v1.0.0/lst_data.zip -WORKDIR /custom_apps/lst_directory/LST-AI/LST_AI/ -RUN unzip data.zip && rm data.zip - # Retrieve model weights for HD-BET WORKDIR /custom_apps/lst_directory/ RUN mkdir -p /root/hd-bet_params @@ -102,6 +112,21 @@ RUN wget -O /root/hd-bet_params/data.zip \ WORKDIR /root/hd-bet_params/ RUN unzip data.zip && rm data.zip +WORKDIR /custom_apps/lst_directory +ARG CACHEBUST=1 +RUN git clone https://github.com/jqmcginnis/LST-AI/ +WORKDIR /custom_apps/lst_directory/LST-AI +RUN git checkout 1c05bb5ac58144e20fd82fa9bca95c6d16728f8c +# pip or pip3 depending on your system +RUN pip install -e . + +# Retrieve model weights and files for LST-AI +WORKDIR /custom_apps/lst_directory/ +RUN wget -O /custom_apps/lst_directory/LST-AI/LST_AI/data.zip \ + https://github.com/CompImg/LST-AI/releases/download/v1.0.0/lst_data.zip +WORKDIR /custom_apps/lst_directory/LST-AI/LST_AI/ +RUN unzip data.zip && rm data.zip + # Make directories for easily mounting data # You may change these to your liking RUN mkdir -p /custom_apps/lst_input From f3274e099978b180ee384d1d74b6b3964fc07b0c Mon Sep 17 00:00:00 2001 From: Julian McGinnis <33037028+jqmcginnis@users.noreply.github.com> Date: Fri, 8 Dec 2023 21:39:39 +0100 Subject: [PATCH 07/20] Update setup.py --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index b6e7332..cf72890 100644 --- a/setup.py +++ b/setup.py @@ -11,7 +11,7 @@ 'benedict.wiestler@tum.de'], keywords=['lesion_segmentation', 'ms', 'lst', 'ai'], - python_requires='>=3.8', + python_requires='>=3.9', install_requires = [ 'numpy', 'pillow', From 41020c63c35877a4067cb3199981609d0a316d34 Mon Sep 17 00:00:00 2001 From: Julian McGinnis <33037028+jqmcginnis@users.noreply.github.com> Date: Fri, 8 Dec 2023 23:01:52 +0100 Subject: [PATCH 08/20] Update setup.py --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index cf72890..77c1dbc 100644 --- a/setup.py +++ b/setup.py @@ -18,7 +18,7 @@ 'scipy>=1.9.0', 'scikit-image>=0.21.0', 'tensorflow>=2.14', - 'tensorflow-addons>=0.23', + 'tensorflow-addons', 'nibabel', 'requests' ], From da712313b35ce7f9a86fc99d31e61155e2efca29 Mon Sep 17 00:00:00 2001 From: Julian McGinnis <33037028+jqmcginnis@users.noreply.github.com> Date: Fri, 8 Dec 2023 23:08:50 +0100 Subject: [PATCH 09/20] Update setup.py --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 77c1dbc..c9ad5bf 100644 --- a/setup.py +++ b/setup.py @@ -17,7 +17,7 @@ 'pillow', 'scipy>=1.9.0', 'scikit-image>=0.21.0', - 'tensorflow>=2.14', + 'tensorflow', 'tensorflow-addons', 'nibabel', 'requests' From c2e2c7fb2ec59d6fe87a1b2a47444ae84d8915f4 Mon Sep 17 00:00:00 2001 From: Julian McGinnis Date: Tue, 12 Dec 2023 14:51:06 +0100 Subject: [PATCH 10/20] replace TFA with TF Custom Implementation --- LST_AI/segment.py | 86 ++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 82 insertions(+), 4 deletions(-) diff --git a/LST_AI/segment.py b/LST_AI/segment.py index eda4ee1..4f05ae0 100644 --- a/LST_AI/segment.py +++ b/LST_AI/segment.py @@ -5,9 +5,77 @@ import numpy as np os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' import tensorflow as tf -import tensorflow_addons as tfa -#logging.getLogger("tensorflow").setLevel(logging.CRITICAL) -#logging.getLogger("tensorflow_addons").setLevel(logging.CRITICAL) + +def load_custom_model(model_path): + custom_objects = { + 'Addons>InstanceNormalization': CustomGroupNormalization, # Assuming 'InstanceNormalization' is the class name + # Add any other custom layers or objects here if needed + } + return tf.keras.models.load_model(model_path, custom_objects=custom_objects, compile=False) + + +class CustomGroupNormalization(tf.keras.layers.Layer): + def __init__(self, groups=-1, **kwargs): + # Extract necessary arguments from kwargs + self.groups = kwargs.pop('groups', -1) + self.epsilon = kwargs.pop('epsilon', 0.001) + self.center = kwargs.pop('center', True) + self.scale = kwargs.pop('scale', True) + self.beta_initializer = kwargs.pop('beta_initializer', 'zeros') + self.gamma_initializer = kwargs.pop('gamma_initializer', 'ones') + self.beta_regularizer = kwargs.pop('beta_regularizer', None) + self.gamma_regularizer = kwargs.pop('gamma_regularizer', None) + self.beta_constraint = kwargs.pop('beta_constraint', None) + self.gamma_constraint = kwargs.pop('gamma_constraint', None) + + # 'axis' argument is not used in GroupNormalization, so we remove it + kwargs.pop('axis', None) + + super(CustomGroupNormalization, self).__init__(**kwargs) + self.group_norm = tf.keras.layers.GroupNormalization( + groups=self.groups, + epsilon=self.epsilon, + center=self.center, + scale=self.scale, + beta_initializer=self.beta_initializer, + gamma_initializer=self.gamma_initializer, + beta_regularizer=self.beta_regularizer, + gamma_regularizer=self.gamma_regularizer, + beta_constraint=self.beta_constraint, + gamma_constraint=self.gamma_constraint, + **kwargs + ) + + def call(self, inputs, training=None): + return self.group_norm(inputs, training=training) + + def get_config(self): + config = super(CustomGroupNormalization, self).get_config() + config.update({ + 'groups': self.groups, + 'epsilon': self.epsilon, + 'center': self.center, + 'scale': self.scale, + 'beta_initializer': self.beta_initializer, + 'gamma_initializer': self.gamma_initializer, + 'beta_regularizer': self.beta_regularizer, + 'gamma_regularizer': self.gamma_regularizer, + 'beta_constraint': self.beta_constraint, + 'gamma_constraint': self.gamma_constraint + }) + return config + + + + + +def replace_layer(model, custom_layer_class, layer_to_replace): + for layer in model.layers: + if isinstance(layer, layer_to_replace): + # Create the custom layer with the same configuration + new_layer = custom_layer_class(**layer.get_config()) + model._layers[model.layers.index(layer)] = new_layer + return model def unet_segmentation(model_path, mni_t1, mni_flair, output_segmentation_path, device='cpu', input_shape=(192,192,192), threshold=0.5): """ @@ -99,7 +167,17 @@ def preprocess_intensities(img_arr): for i, model in enumerate(unet_mdls): with tf.device(tf_device): print(f"Running model {i}. ") - mdl = tf.keras.models.load_model(model, compile=False) + # mdl = tf.keras.models.load_model(model, compile=False) + # Load your model (adjust this according to how you have saved your model) + # mdl = tf.keras.models.load_model(model, compile=False) + mdl = load_custom_model(model) + + # Replace TFA Instance Normalization layers with CustomGroupNormalization + # Assume 'layer_to_replace' is the class of the TFA Instance Normalization layer + # mdl = replace_layer(model, CustomGroupNormalization, "Addons>InstanceNormalization") + + # Compile the model if necessary + # model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) img_image = np.stack([flair, t1], axis=-1) img_image = np.expand_dims(img_image, axis=0) From 99516d671fc2d118caf1b24d5a111f49b2a8294d Mon Sep 17 00:00:00 2001 From: Julian McGinnis Date: Tue, 12 Dec 2023 14:52:01 +0100 Subject: [PATCH 11/20] remove tfa from setup.py --- setup.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/setup.py b/setup.py index c9ad5bf..bb0e67f 100644 --- a/setup.py +++ b/setup.py @@ -6,10 +6,10 @@ url='https://github.com/CompImg/LST-AI', author='LST-AI Team', author_email=['julian.mcginnis@tum.de', - 'tun.wiltgen@tum.de', - 'mark.muehlau@tum.de', + 'tun.wiltgen@tum.de', + 'mark.muehlau@tum.de', 'benedict.wiestler@tum.de'], - keywords=['lesion_segmentation', + keywords=['lesion_segmentation', 'ms', 'lst', 'ai'], python_requires='>=3.9', install_requires = [ @@ -18,7 +18,6 @@ 'scipy>=1.9.0', 'scikit-image>=0.21.0', 'tensorflow', - 'tensorflow-addons', 'nibabel', 'requests' ], From c56fa98b32b0134899b77c480a27278039e9c5c2 Mon Sep 17 00:00:00 2001 From: Julian McGinnis Date: Tue, 12 Dec 2023 15:19:47 +0100 Subject: [PATCH 12/20] clean up code --- LST_AI/segment.py | 31 ++++--------------------------- 1 file changed, 4 insertions(+), 27 deletions(-) diff --git a/LST_AI/segment.py b/LST_AI/segment.py index 4f05ae0..de820a9 100644 --- a/LST_AI/segment.py +++ b/LST_AI/segment.py @@ -6,12 +6,11 @@ os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' import tensorflow as tf -def load_custom_model(model_path): +def load_custom_model(model_path, compile=False): custom_objects = { - 'Addons>InstanceNormalization': CustomGroupNormalization, # Assuming 'InstanceNormalization' is the class name - # Add any other custom layers or objects here if needed + 'Addons>InstanceNormalization': CustomGroupNormalization, } - return tf.keras.models.load_model(model_path, custom_objects=custom_objects, compile=False) + return tf.keras.models.load_model(model_path, custom_objects=custom_objects, compile=compile) class CustomGroupNormalization(tf.keras.layers.Layer): @@ -65,18 +64,6 @@ def get_config(self): }) return config - - - - -def replace_layer(model, custom_layer_class, layer_to_replace): - for layer in model.layers: - if isinstance(layer, layer_to_replace): - # Create the custom layer with the same configuration - new_layer = custom_layer_class(**layer.get_config()) - model._layers[model.layers.index(layer)] = new_layer - return model - def unet_segmentation(model_path, mni_t1, mni_flair, output_segmentation_path, device='cpu', input_shape=(192,192,192), threshold=0.5): """ Segment medical images using ensemble of U-Net models. @@ -167,17 +154,7 @@ def preprocess_intensities(img_arr): for i, model in enumerate(unet_mdls): with tf.device(tf_device): print(f"Running model {i}. ") - # mdl = tf.keras.models.load_model(model, compile=False) - # Load your model (adjust this according to how you have saved your model) - # mdl = tf.keras.models.load_model(model, compile=False) - mdl = load_custom_model(model) - - # Replace TFA Instance Normalization layers with CustomGroupNormalization - # Assume 'layer_to_replace' is the class of the TFA Instance Normalization layer - # mdl = replace_layer(model, CustomGroupNormalization, "Addons>InstanceNormalization") - - # Compile the model if necessary - # model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) + mdl = load_custom_model(model, compile=False) img_image = np.stack([flair, t1], axis=-1) img_image = np.expand_dims(img_image, axis=0) From 7f8a071765c411def449c726b876fb13d6b2f3c2 Mon Sep 17 00:00:00 2001 From: Julian McGinnis Date: Thu, 1 Feb 2024 10:29:26 +0100 Subject: [PATCH 13/20] statistics and documentation --- LST_AI/annotate.py | 1 + LST_AI/custom_tf.py | 93 ++++++++++++++++++++++++++++++ LST_AI/lst | 56 ++++++++++-------- LST_AI/register.py | 4 +- LST_AI/segment.py | 63 +------------------- LST_AI/stats.py | 95 ++++++++++++++++++++++++++++++ LST_AI/strip.py | 29 +++++++++- cpu/Dockerfile | 137 -------------------------------------------- docker/Dockerfile | 131 ++++++++++++++++++++++++++++++++++++++++++ docker/Readme.md | 51 +++++++++++++++++ gpu/Dockerfile | 122 --------------------------------------- 11 files changed, 437 insertions(+), 345 deletions(-) create mode 100644 LST_AI/custom_tf.py create mode 100644 LST_AI/stats.py delete mode 100644 cpu/Dockerfile create mode 100644 docker/Dockerfile create mode 100644 docker/Readme.md delete mode 100644 gpu/Dockerfile diff --git a/LST_AI/annotate.py b/LST_AI/annotate.py index e4d5343..210fd2c 100644 --- a/LST_AI/annotate.py +++ b/LST_AI/annotate.py @@ -114,6 +114,7 @@ def annotate_lesions(atlas_t1, atlas_mask, t1w_native, seg_native, out_atlas_war if __name__ == "__main__": + # Only for testing purposes lst_dir = os.getcwd() parent_directory = os.path.dirname(lst_dir) atlas_t1w_path = os.path.join(parent_directory, "atlas", "sub-mni152_space-mni_t1.nii.gz") diff --git a/LST_AI/custom_tf.py b/LST_AI/custom_tf.py new file mode 100644 index 0000000..0d03b5b --- /dev/null +++ b/LST_AI/custom_tf.py @@ -0,0 +1,93 @@ +import tensorflow as tf +import numpy as np + +def load_custom_model(model_path, compile=False): + """ + Loads a custom TensorFlow Keras model from the specified path. + + This function is specifically designed to handle models that originally used the + `tfa.InstanceNormalization` layer from TensorFlow Addons (tfa). Since tfa is no + longer maintained, this function replaces the `InstanceNormalization` layer with a + custom layer, `CustomGroupNormalization`, to ensure compatibility and avoid the need + for installing tfa. + + Args: + model_path (str): The file path to the saved Keras model. + compile (bool): If True, compiles the model after loading. Defaults to False. + + Returns: + tf.keras.Model: The loaded Keras model with `InstanceNormalization` layers replaced + by `CustomGroupNormalization`. + + Example: + >>> model = load_custom_model('path/to/model.h5', compile=True) + """ + custom_objects = { + 'Addons>InstanceNormalization': CustomGroupNormalization, + } + return tf.keras.models.load_model(model_path, custom_objects=custom_objects, compile=compile) + + + +class CustomGroupNormalization(tf.keras.layers.Layer): + """ + Custom Group Normalization layer for TensorFlow Keras models. + + This class provides an alternative to the `tfa.InstanceNormalization` layer found in + TensorFlow Addons (tfa), which is no longer maintained and not available for MAC ARM platforms. + It facilitates the use of group normalization in models without the dependency on tfa, ensuring + compatibility and broader platform support. + + Args: + groups (int): Number of groups for Group Normalization. Default is -1. + **kwargs: Additional keyword arguments for layer configuration. + """ + def __init__(self, groups=-1, **kwargs): + # Extract necessary arguments from kwargs + self.groups = kwargs.pop('groups', -1) + self.epsilon = kwargs.pop('epsilon', 0.001) + self.center = kwargs.pop('center', True) + self.scale = kwargs.pop('scale', True) + self.beta_initializer = kwargs.pop('beta_initializer', 'zeros') + self.gamma_initializer = kwargs.pop('gamma_initializer', 'ones') + self.beta_regularizer = kwargs.pop('beta_regularizer', None) + self.gamma_regularizer = kwargs.pop('gamma_regularizer', None) + self.beta_constraint = kwargs.pop('beta_constraint', None) + self.gamma_constraint = kwargs.pop('gamma_constraint', None) + + # 'axis' argument is not used in GroupNormalization, so we remove it + kwargs.pop('axis', None) + + super(CustomGroupNormalization, self).__init__(**kwargs) + self.group_norm = tf.keras.layers.GroupNormalization( + groups=self.groups, + epsilon=self.epsilon, + center=self.center, + scale=self.scale, + beta_initializer=self.beta_initializer, + gamma_initializer=self.gamma_initializer, + beta_regularizer=self.beta_regularizer, + gamma_regularizer=self.gamma_regularizer, + beta_constraint=self.beta_constraint, + gamma_constraint=self.gamma_constraint, + **kwargs + ) + + def call(self, inputs, training=None): + return self.group_norm(inputs, training=training) + + def get_config(self): + config = super(CustomGroupNormalization, self).get_config() + config.update({ + 'groups': self.groups, + 'epsilon': self.epsilon, + 'center': self.center, + 'scale': self.scale, + 'beta_initializer': self.beta_initializer, + 'gamma_initializer': self.gamma_initializer, + 'beta_regularizer': self.beta_regularizer, + 'gamma_regularizer': self.gamma_regularizer, + 'beta_constraint': self.beta_constraint, + 'gamma_constraint': self.gamma_constraint + }) + return config \ No newline at end of file diff --git a/LST_AI/lst b/LST_AI/lst index 179483c..c5e5f18 100755 --- a/LST_AI/lst +++ b/LST_AI/lst @@ -15,19 +15,11 @@ import tempfile import shutil import argparse -# to filter the warning: -# WARNING:root:The given value for groups will be overwritten. -import logging -class Filter(logging.Filter): - def filter(self, record): - return 'The given value for groups will be overwritten.' not in record.getMessage() - -logging.getLogger().addFilter(Filter()) - from LST_AI.strip import run_hdbet, apply_mask from LST_AI.register import mni_registration, apply_warp, rigid_reg from LST_AI.segment import unet_segmentation from LST_AI.annotate import annotate_lesions +from LST_AI.stats import compute_stats from LST_AI.utils import download_data if __name__ == "__main__": @@ -135,10 +127,10 @@ if __name__ == "__main__": os.makedirs(work_dir) # Define Image Paths (original space) - path_org_t1w = os.path.join(work_dir, 'sub-X_ses-Y_space-orig_T1w.nii.gz') - path_org_flair = os.path.join(work_dir, 'sub-X_ses-Y_space-orig_FLAIR.nii.gz') - path_org_stripped_t1w = os.path.join(work_dir, 'sub-X_ses-Y_space-orig_desc-stripped_T1w.nii.gz') - path_org_stripped_flair = os.path.join(work_dir, 'sub-X_ses-Y_space-orig_desc-stripped_FLAIR.nii.gz') + path_org_t1w = os.path.join(work_dir, 'sub-X_ses-Y_space-t1w_T1w.nii.gz') + path_org_flair = os.path.join(work_dir, 'sub-X_ses-Y_space-flair_FLAIR.nii.gz') + path_org_stripped_t1w = os.path.join(work_dir, 'sub-X_ses-Y_space-t1w_desc-stripped_T1w.nii.gz') + path_org_stripped_flair = os.path.join(work_dir, 'sub-X_ses-Y_space-flair_desc-stripped_FLAIR.nii.gz') # Define Image Paths (MNI space) path_mni_t1w = os.path.join(work_dir, 'sub-X_ses-Y_space-mni_T1w.nii.gz') @@ -147,16 +139,24 @@ if __name__ == "__main__": path_mni_stripped_flair = os.path.join(work_dir, 'sub-X_ses-Y_space-mni_desc-stripped_FLAIR.nii.gz') # Masks - path_orig_brainmask_t1w = os.path.join(work_dir, 'sub-X_ses-Y_space-org_T1w_mask.nii.gz') - path_orig_brainmask_flair = os.path.join(work_dir, 'sub-X_ses-Y_space-org_FLAIR_mask.nii.gz') + path_orig_brainmask_t1w = os.path.join(work_dir, 'sub-X_ses-Y_space-t1w_T1w_mask.nii.gz') + path_orig_brainmask_flair = os.path.join(work_dir, 'sub-X_ses-Y_space-flair_FLAIR_mask.nii.gz') path_mni_brainmask = os.path.join(work_dir, 'sub-X_ses-Y_space-mni_brainmask.nii.gz') - # Segmentation results - path_orig_segmentation = os.path.join(work_dir, 'sub-X_ses-Y_space-orig_seg.nii.gz') + # Temp Segmentation results + path_orig_segmentation = os.path.join(work_dir, 'sub-X_ses-Y_space-flair_seg.nii.gz') path_mni_segmentation = os.path.join(work_dir, 'sub-X_ses-Y_space-mni_seg.nii.gz') - path_orig_annotated_segmentation = os.path.join(work_dir, 'sub-X_ses-Y_space-orig_seg-annotated.nii.gz') + path_orig_annotated_segmentation = os.path.join(work_dir, 'sub-X_ses-Y_space-flair_seg-annotated.nii.gz') path_mni_annotated_segmentation = os.path.join(work_dir, 'sub-X_ses-Y_space-mni_seg-annotated.nii.gz') + # Output paths (in original space) + filename_output_segmentation = "space-flair_seg-lst.nii.gz" + filename_output_annotated_segmentation = "space-flair_desc-annotated_seg-lst.nii.gz" + + # Stats + filename_output_stats_segmentation = "lesion_stats.csv" + filename_output_stats_annotated_segmentation = "annotated_lesion_stats.csv" + # affines path_affine_mni_t1w = os.path.join(work_dir, 'affine_t1w_to_mni.mat') path_affine_mni_flair = os.path.join(work_dir, 'affine_flair_to_mni.mat') @@ -187,6 +187,7 @@ if __name__ == "__main__": # Annotation only if args.annotate_only: + print("LST-AI assumes existing segmentation to be in FLAIR space.") if os.path.isfile(args.existing_seg): shutil.copy(args.existing_seg, path_orig_segmentation) else: @@ -240,7 +241,7 @@ if __name__ == "__main__": out_annotated_native=path_orig_annotated_segmentation) shutil.copy(path_orig_annotated_segmentation, - os.path.join(args.output, "space-orig_desc-annotated_seg-lst.nii.gz")) + os.path.join(args.output, filename_output_annotated_segmentation)) # Segmentation only + (opt. Annotation) @@ -283,7 +284,6 @@ if __name__ == "__main__": # move processed mask to correct naming convention hdbet_mask = path_mni_stripped_t1w.replace(".nii.gz", "_mask.nii.gz") - print(hdbet_mask) shutil.copy(hdbet_mask, path_mni_brainmask) # then apply brain mask to FLAIR @@ -333,7 +333,7 @@ if __name__ == "__main__": n_threads=args.threads) # store the segmentations - shutil.copy(path_orig_segmentation, os.path.join(args.output, "space-orig_seg-lst.nii.gz")) + shutil.copy(path_orig_segmentation, os.path.join(args.output, filename_output_segmentation)) # Annotation if not args.segment_only: @@ -354,8 +354,18 @@ if __name__ == "__main__": n_threads=args.threads) # store the segmentations - shutil.copy(path_orig_annotated_segmentation, os.path.join(args.output, "space-orig_desc-annotated_seg-lst.nii.gz")) - + shutil.copy(path_orig_annotated_segmentation, os.path.join(args.output, filename_output_annotated_segmentation)) + + # Compute Stats of (annotated) segmentation if they exist + if os.path.exists(path_orig_segmentation): + compute_stats(mask=path_orig_segmentation, + output_file=os.path.join(args.output, filename_output_stats_segmentation), + multi_class=False) + + if os.path.exists(path_orig_annotated_segmentation): + compute_stats(mask=path_orig_annotated_segmentation, + output_file=os.path.join(args.output, filename_output_stats_annotated_segmentation), + multi_class=True) print(f"Results in {work_dir}") if not args.temp: diff --git a/LST_AI/register.py b/LST_AI/register.py index 089da0a..74cca8d 100644 --- a/LST_AI/register.py +++ b/LST_AI/register.py @@ -106,10 +106,10 @@ def apply_warp(image_org_space, affine, origin, target, reverse=False, n_threads subprocess.run(shlex.split(warp_call), check=True) - - if __name__ == "__main__": + # Testing only + # Working directory script_dir = os.getcwd() parent_directory = os.path.dirname(script_dir) diff --git a/LST_AI/segment.py b/LST_AI/segment.py index de820a9..5344d55 100644 --- a/LST_AI/segment.py +++ b/LST_AI/segment.py @@ -1,68 +1,11 @@ import os import logging -logging.getLogger('tensorflow').disabled = True import nibabel as nib import numpy as np -os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' import tensorflow as tf -def load_custom_model(model_path, compile=False): - custom_objects = { - 'Addons>InstanceNormalization': CustomGroupNormalization, - } - return tf.keras.models.load_model(model_path, custom_objects=custom_objects, compile=compile) - - -class CustomGroupNormalization(tf.keras.layers.Layer): - def __init__(self, groups=-1, **kwargs): - # Extract necessary arguments from kwargs - self.groups = kwargs.pop('groups', -1) - self.epsilon = kwargs.pop('epsilon', 0.001) - self.center = kwargs.pop('center', True) - self.scale = kwargs.pop('scale', True) - self.beta_initializer = kwargs.pop('beta_initializer', 'zeros') - self.gamma_initializer = kwargs.pop('gamma_initializer', 'ones') - self.beta_regularizer = kwargs.pop('beta_regularizer', None) - self.gamma_regularizer = kwargs.pop('gamma_regularizer', None) - self.beta_constraint = kwargs.pop('beta_constraint', None) - self.gamma_constraint = kwargs.pop('gamma_constraint', None) - - # 'axis' argument is not used in GroupNormalization, so we remove it - kwargs.pop('axis', None) - - super(CustomGroupNormalization, self).__init__(**kwargs) - self.group_norm = tf.keras.layers.GroupNormalization( - groups=self.groups, - epsilon=self.epsilon, - center=self.center, - scale=self.scale, - beta_initializer=self.beta_initializer, - gamma_initializer=self.gamma_initializer, - beta_regularizer=self.beta_regularizer, - gamma_regularizer=self.gamma_regularizer, - beta_constraint=self.beta_constraint, - gamma_constraint=self.gamma_constraint, - **kwargs - ) - - def call(self, inputs, training=None): - return self.group_norm(inputs, training=training) - - def get_config(self): - config = super(CustomGroupNormalization, self).get_config() - config.update({ - 'groups': self.groups, - 'epsilon': self.epsilon, - 'center': self.center, - 'scale': self.scale, - 'beta_initializer': self.beta_initializer, - 'gamma_initializer': self.gamma_initializer, - 'beta_regularizer': self.beta_regularizer, - 'gamma_regularizer': self.gamma_regularizer, - 'beta_constraint': self.beta_constraint, - 'gamma_constraint': self.gamma_constraint - }) - return config +from LST_AI.custom_tf import load_custom_model + def unet_segmentation(model_path, mni_t1, mni_flair, output_segmentation_path, device='cpu', input_shape=(192,192,192), threshold=0.5): """ @@ -184,7 +127,7 @@ def preprocess_intensities(img_arr): if __name__ == "__main__": - + # Testing only # Working directory script_dir = os.getcwd() parent_dir = os.path.dirname(script_dir) diff --git a/LST_AI/stats.py b/LST_AI/stats.py new file mode 100644 index 0000000..fbbb6fe --- /dev/null +++ b/LST_AI/stats.py @@ -0,0 +1,95 @@ +import nibabel as nib +import numpy as np +import csv +import argparse +from scipy.ndimage import label + +def compute_stats(mask_file, output_file, multi_class): + """ + Compute statistics from a lesion mask and save the results to a CSV file. + + Parameters: + mask_file (str): Path to the input mask file in NIfTI format. + output_file (str): Path to the output CSV file where results will be saved. + multi_class (bool): Flag indicating whether the mask contains multiple classes (True) or is binary (False). + + This function calculates the number of lesions, the number of voxels in lesions, and the total lesion volume. + If `multi_class` is True, these statistics are calculated for each lesion class separately. + """ + # Load the mask file + mask = nib.load(mask_file) + mask_data = mask.get_fdata() + + # Voxel dimensions to calculate volume + voxel_dims = mask.header.get_zooms() + + results = [] + + if multi_class: + # Multi-class processing + lesion_labels = [1, 2, 3, 4] + label_names = { + 1: 'Periventricular', + 2: 'Juxtacortical', + 3: 'Subcortical', + 4: 'Infratentorial' + } + + for lesion_label in lesion_labels: + class_mask = mask_data == lesion_label + + # Count lesions (connected components) for each class + _ , num_lesions = label(class_mask) + + voxel_count = np.count_nonzero(class_mask) + volume = voxel_count * np.prod(voxel_dims) + + results.append({ + 'Region': label_names[lesion_label], + 'Num_Lesions': num_lesions, + 'Num_Vox': voxel_count, + 'Lesion_Volume': volume + }) + + else: + # Binary mask processing + # Assert that only two unique values are present (0 and 1) + unique_values = np.unique(mask_data) + assert len(unique_values) <= 2, "Binary mask must contain no more than two unique values." + + # Count lesions (connected components) in binary mask + _, num_lesions = label(mask_data > 0) + + voxel_count = np.count_nonzero(mask_data) + volume = voxel_count * np.prod(voxel_dims) + + results.append({ + 'Num_Lesions': num_lesions, + 'Num_Vox_Lesions': voxel_count, + 'Lesion_Volume': volume + }) + + # Save results to CSV + with open(output_file, 'w', newline='') as file: + writer = csv.writer(file) + if multi_class: + writer.writerow(['Region', 'Num_Lesions', 'Num_Vox', 'Lesion_Volume']) + for result in results: + writer.writerow([result['Region'], result['Num_Lesions'], result['Num_Vox'], result['Lesion_Volume']]) + else: + writer.writerow(['Num_Lesions', 'Num_Vox_Lesions', 'Lesion_Volume']) + for result in results: + writer.writerow([result['Num_Lesions'], result['Num_Vox_Lesions'], result['Lesion_Volume']]) + +if __name__ == "__main__": + """ + Main entry point of the script. Parses command-line arguments and calls the compute_stats function. + """ + parser = argparse.ArgumentParser(description='Process a lesion mask file.') + parser.add_argument('--in', dest='input_file', required=True, help='Input mask file path') + parser.add_argument('--out', dest='output_file', required=True, help='Output CSV file path') + parser.add_argument('--multi-class', dest='multi_class', action='store_true', help='Flag for multi-class processing') + + args = parser.parse_args() + + compute_stats(args.input_file, args.output_file, args.multi_class) diff --git a/LST_AI/strip.py b/LST_AI/strip.py index 6b731c6..9b51008 100644 --- a/LST_AI/strip.py +++ b/LST_AI/strip.py @@ -4,7 +4,22 @@ import numpy as np def run_hdbet(input_image, output_image, device, mode="accurate"): - assert mode in ["accurate","fast"], 'Unkown HD-BET mode. Please choose either "accurate" or "fast"' + """ + Runs the HD-BET tool to perform brain extraction on an input image. + + Parameters: + input_image (str): Path to the input image file. + output_image (str): Path for the output image file. + device (str): The device to use for computation, either a GPU device number or 'cpu'. + mode (str, optional): Operation mode of HD-BET. Can be 'accurate' or 'fast'. Default is 'accurate'. + + Raises: + AssertionError: If an unknown mode is provided. + + This function utilizes HD-BET, a tool for brain extraction from MRI images. Depending on the chosen mode + and device, it executes the appropriate command. + """ + assert mode in ["accurate","fast"], 'Unknown HD-BET mode. Please choose either "accurate" or "fast"' if "cpu" in str(device).lower(): bet_call = f"hd-bet -i {input_image} -device cpu -mode {mode} -tta 0 -o {output_image}" @@ -14,6 +29,18 @@ def run_hdbet(input_image, output_image, device, mode="accurate"): subprocess.run(shlex.split(bet_call), check=True) def apply_mask(input_image, mask, output_image): + """ + Applies a mask to an input image and saves the result. + + Parameters: + input_image (str): Path to the input image file. + mask (str): Path to the mask image file. + output_image (str): Path for the output image file where the masked image will be saved. + + This function loads a brain mask and an input image, applies the mask to the input image, + and then saves the result. The mask and the input image are expected to be in a compatible format + and spatial alignment. + """ brain_mask_arr = nib.load(mask).get_fdata() image_nib = nib.load(input_image) image_arr = np.multiply(image_nib.get_fdata(), brain_mask_arr) diff --git a/cpu/Dockerfile b/cpu/Dockerfile deleted file mode 100644 index c46b0da..0000000 --- a/cpu/Dockerfile +++ /dev/null @@ -1,137 +0,0 @@ -# Start with the base Ubuntu 22.04 LTS image -# FROM ubuntu:22.04 -FROM nvidia/cuda:12.3.1-runtime-ubuntu20.04 - -# Prevents prompts from asking for user input during package installation -ENV DEBIAN_FRONTEND=noninteractive - -# Update and install required packages -RUN apt-get update && apt-get install -y \ - git \ - wget \ - unzip - -# copied from https://stackoverflow.com/a/76170605/3485363 -RUN apt-get update && DEBIAN_FRONTEND=noninteractive \ - apt-get install -y software-properties-common && \ - add-apt-repository -y ppa:deadsnakes/ppa && \ - apt-get install -y python3.10 curl && \ - curl -sS https://bootstrap.pypa.io/get-pip.py | python3.10 - -# RUN curl -sSL https://install.python-poetry.org | python3.10 - --preview -RUN pip3 install --upgrade requests -RUN ln -fs /usr/bin/python3.10 /usr/bin/python -RUN python --version - -# Setup LST-AI -RUN mkdir -p /custom_apps/lst_directory - -# Setup for greedy (Choose either pre-compiled or compilation from source) - -# There are two possible ways of obtaining 'greedy' -# Download pre-compiled version via wget -# Compile greedy from source (which requires ITK and VTK) - -# Option 1: Download pre-compiled version of greedy -# Download pre-compiled version of greedy and place in $PATH - -# https://sourceforge.net/projects/greedy-reg/files/Nightly/greedy-nightly-MacOS-x86_64.dmg/download - -# for MAC use this instead? -#WORKDIR /custom_apps/lst_directory -#RUN wget "https://sourceforge.net/projects/greedy-reg/files/Nightly/greedy-nightly-MacOS-x86_64.dmg/download" -o "greedy.dmg" && \ -# chmod +x greedy && \ -# mv greedy /usr/local/bin - - -WORKDIR /custom_apps/lst_directory -RUN wget "https://github.com/CompImg/LST-AI/releases/download/v1.0.0/greedy" && \ - chmod +x greedy && \ - mv greedy /usr/local/bin - -# Option 2: Compile greedy from source -# RUN apt-get update && apt-get install -y \ -# build-essential \ -# libpng-dev \ -# libtiff-dev \ -# uuid-dev \ -# make \ -# cmake \ -# g++ \ -# libgl1-mesa-dev - -# RUN wget https://github.com/InsightSoftwareConsortium/ITK/archive/refs/tags/v5.2.1.tar.gz && \ -# tar -zxvf v5.2.1.tar.gz && \ -# cd ITK-5.2.1 && \ -# mkdir build && \ -# cd build && \ -# cmake .. && \ -# make -j$(nproc) && \ -# make install - -# # Download and extract VTK 9.1.0 -# WORKDIR /opt -# RUN wget https://www.vtk.org/files/release/9.1/VTK-9.1.0.tar.gz && \ -# tar -xf VTK-9.1.0.tar.gz - -# # Build VTK -# WORKDIR /opt/VTK-9.1.0/build -# RUN cmake .. && \ -# make -j$(nproc) && \ -# make install - -# # Set up the VTK_DIR environment variable -# ENV VTK_DIR=/usr/local/lib/cmake/vtk-9.1 - -# Set up the directory tree and clone the greedy repository -# RUN mkdir /custom_apps -# WORKDIR /custom_apps -# RUN git clone https://github.com/pyushkevich/greedy -# RUN echo $PWD -# RUN mkdir -p greedy/build - -# # Set the build directory as the working directory -# WORKDIR /custom_apps/greedy/build -# RUN cmake .. - -# # Compile using make with the available number of CPU cores -# RUN make -j$(nproc) -# RUN make install - -# Install HD-BET -WORKDIR /custom_apps/lst_directory -RUN git clone https://github.com/MIC-DKFZ/HD-BET -WORKDIR /custom_apps/lst_directory/HD-BET -RUN pip install -e . - -# Retrieve model weights for HD-BET -WORKDIR /custom_apps/lst_directory/ -RUN mkdir -p /root/hd-bet_params -RUN wget -O /root/hd-bet_params/data.zip \ - https://zenodo.org/api/records/2540695/files-archive -WORKDIR /root/hd-bet_params/ -RUN unzip data.zip && rm data.zip - -WORKDIR /custom_apps/lst_directory -ARG CACHEBUST=1 -RUN git clone https://github.com/jqmcginnis/LST-AI/ -WORKDIR /custom_apps/lst_directory/LST-AI -RUN git checkout 1c05bb5ac58144e20fd82fa9bca95c6d16728f8c -# pip or pip3 depending on your system -RUN pip install -e . - -# Retrieve model weights and files for LST-AI -WORKDIR /custom_apps/lst_directory/ -RUN wget -O /custom_apps/lst_directory/LST-AI/LST_AI/data.zip \ - https://github.com/CompImg/LST-AI/releases/download/v1.0.0/lst_data.zip -WORKDIR /custom_apps/lst_directory/LST-AI/LST_AI/ -RUN unzip data.zip && rm data.zip - -# Make directories for easily mounting data -# You may change these to your liking -RUN mkdir -p /custom_apps/lst_input -RUN mkdir -p /custom_apps/lst_output -RUN mkdir -p /custom_apps/lst_temp - -# Entrypoint to run the python script when the container starts -ENTRYPOINT [ "lst" ] \ No newline at end of file diff --git a/docker/Dockerfile b/docker/Dockerfile new file mode 100644 index 0000000..267b930 --- /dev/null +++ b/docker/Dockerfile @@ -0,0 +1,131 @@ +FROM nvidia/cuda:12.3.1-runtime-ubuntu22.04 + +# Prevents prompts from asking for user input during package installation +ENV DEBIAN_FRONTEND=noninteractive + +# Update and install required packages +RUN apt-get update && apt-get install -y \ + git \ + wget \ + unzip + +# copied from https://stackoverflow.com/a/76170605/3485363 +RUN apt-get update && DEBIAN_FRONTEND=noninteractive \ + apt-get install -y software-properties-common && \ + add-apt-repository -y ppa:deadsnakes/ppa && \ + apt-get install -y python3.10 curl && \ + curl -sS https://bootstrap.pypa.io/get-pip.py | python3.10 + +RUN pip3 install --upgrade requests +RUN ln -fs /usr/bin/python3.10 /usr/bin/python +RUN python --version + +# # Setup LST-AI +RUN mkdir -p /custom_apps/lst_directory + +# Install prerequisites +# c.f. https://greedy.readthedocs.io/en/latest/install.html#compiling-from-source-code +RUN apt-get update && \ + apt-get install -y cmake g++ git + +# Install additional dependencies for VTK +RUN apt-get install -y libgl1-mesa-dev libxt-dev + +# Install libpng +RUN apt-get install -y libpng-dev + +# Build VTK +# Download and unpack VTK +WORKDIR /VTK +RUN git clone https://gitlab.kitware.com/vtk/vtk.git +WORKDIR /VTK/vtk +RUN git checkout v9.1.0 + +# Create and navigate to the build directory for VTK +RUN mkdir VTK-build +WORKDIR /VTK/vtk/VTK-build +# ENV LD_LIBRARY_PATH=/VTK/vtk/VTK-build:$LD_LIBRARY_PATH + +# Run CMake to configure and build VTK +RUN cmake -DCMAKE_BUILD_TYPE=Release -DBUILD_SHARED_LIBS=OFF .. +RUN make -j ${BUILD_JOBS} +RUN make install + +# Build ITK +# c.f. https://itk.org/Wiki/ITK/Getting_Started/Build/Linux +# Clone the ITK repository +RUN git clone https://github.com/InsightSoftwareConsortium/ITK.git /ITK +WORKDIR /ITK + +# Checkout the specific version +RUN git checkout v5.1.2 + +# Create and navigate to the build directory +RUN mkdir -p /ITK/build +WORKDIR /ITK/build + +# Run CMake to configure and build ITK +RUN cmake -DModule_ITKPNG=ON \ + -DITK_USE_SYSTEM_PNG=ON \ + -DBUILD_TESTING=OFF \ + -DBUILD_SHARED_LIBS=OFF \ + -DCMAKE_BUILD_TYPE=Release .. +# run build process +RUN make -j ${BUILD_JOBS} +RUN make install + +# Clone the greedy repository +RUN git clone https://github.com/pyushkevich/greedy /greedy +WORKDIR /greedy +RUN git checkout 1eafa4c6659b7a669fb299ce98d9531fc23e332a + +# Set the working directory to the build directory +WORKDIR /greedy/build + +# Run ccmake from the build directory +RUN cmake -DITK_DIR=/ITK/build \ + -DVTK_DIR=/VTK/vtk/VTK-build \ + -DCMAKE_BUILD_TYPE=Release \ + -DBUILD_SHARED_LIBS=OFF \ + .. +RUN make -j ${BUILD_JOBS} +RUN make install + +# Install HD-BET +WORKDIR /custom_apps/lst_directory +RUN git clone https://github.com/MIC-DKFZ/HD-BET +WORKDIR /custom_apps/lst_directory/HD-BET +RUN pip install -e . + +# Retrieve model weights for HD-BET +WORKDIR /custom_apps/lst_directory/ +RUN mkdir -p /root/hd-bet_params +RUN wget -O /root/hd-bet_params/data.zip \ + https://zenodo.org/api/records/2540695/files-archive +WORKDIR /root/hd-bet_params/ +RUN unzip data.zip && rm data.zip + +WORKDIR /custom_apps/lst_directory +RUN git clone https://github.com/jqmcginnis/LST-AI/ +WORKDIR /custom_apps/lst_directory/LST-AI +RUN git pull origin main +RUN git checkout c56fa98b32b0134899b77c480a27278039e9c5c2 + +# pip or pip3 depending on your system +RUN pip install -e . + +# Retrieve model weights and files for LST-AI +WORKDIR /custom_apps/lst_directory/ +RUN wget -O /custom_apps/lst_directory/LST-AI/LST_AI/data.zip \ + https://github.com/CompImg/LST-AI/releases/download/v1.0.0/lst_data.zip +WORKDIR /custom_apps/lst_directory/LST-AI/LST_AI/ +RUN unzip data.zip && rm data.zip + +# Make directories for easily mounting data +# You may change these to your liking +RUN mkdir -p /custom_apps/lst_input +RUN mkdir -p /custom_apps/lst_output +RUN mkdir -p /custom_apps/lst_temp + +# Entrypoint to run the python script when the container starts +ENTRYPOINT [ "lst" ] \ No newline at end of file diff --git a/docker/Readme.md b/docker/Readme.md new file mode 100644 index 0000000..844aacf --- /dev/null +++ b/docker/Readme.md @@ -0,0 +1,51 @@ +#### Building the docker + +Info: We are happy to support both ARM64 and AMD64 platforms with the newest docker container. + +#### Guide on how to build the docker natively, and (tp push it to dockerhub) + +To build and push a Docker image for both linux/amd64 and linux/arm64/v8 platforms and then push it to Docker Hub under the name jqmcginnis/lst-ai, you can follow these steps: + +#### 1. Log in to dockerhub + +Open your terminal and log in to your Docker Hub account using the Docker CLI: + +```bash +docker login +``` +Enter your Docker Hub username and password when prompted. + +#### 2. Enable Buildx (if not already enabled) + +Docker Buildx is an extended build feature that supports building multi-platform images. To ensure it is enabled, run: + +```bash +docker buildx create --use --name mybuilder +``` + +#### 3. Start a New Buildx Builder Instance + +This step ensures that the builder instance is started and uses the newly created builder: + +```bash +docker buildx use mybuilder +docker buildx inspect --bootstrap +``` + +#### 4. Build and Push the Image + +Navigate to the directory where your Dockerfile is located, then build and push the image for both platforms. Replace path/to/dockerfile with the actual path to your Dockerfile if it's not in the current directory: + +```bash +docker buildx build --platform linux/amd64,linux/arm64/v8 -t jqmcginnis/lst-ai --push . +``` +This command will build the image for amd64 and arm64/v8 architectures and push it to Docker Hub under the repository jqmcginnis/lst-ai. It may take several hpurs (!). + +#### 5. Verify the Push + +Navigate to the directory where your Dockerfile is located, then build and push the image for both platforms. Replace path/to/dockerfile with the actual path to your Dockerfile if it's not in the current directory: + +```bash +docker buildx build --platform linux/amd64,linux/arm64/v8 -t jqmcginnis/lst-ai --push . +``` +This command will build the image for amd64 and arm64/v8 architectures and push it to Docker Hub under the repository jqmcginnis/lst-ai. It may take several hours (!). \ No newline at end of file diff --git a/gpu/Dockerfile b/gpu/Dockerfile deleted file mode 100644 index d436a0c..0000000 --- a/gpu/Dockerfile +++ /dev/null @@ -1,122 +0,0 @@ -# Start with NVIDIA CUDA image for GPU support -FROM nvidia/cuda:12.1.0-runtime-ubuntu20.04 - -# Prevents prompts from asking for user input during package installation -ENV DEBIAN_FRONTEND=noninteractive - -# Update and install required packages -RUN apt-get update && apt-get install -y \ - git \ - wget \ - unzip \ - python3 \ - python3-pip - -RUN pip install --upgrade pip -# Install PyTorch (Choose the version compatible with the CUDA version) -RUN pip3 install torch torchvision torchaudio -# Install TensorFlow (Choose the version compatible with the CUDA version) -RUN pip3 install tensorflow-gpu==2.11.0 -RUN pip3 install tensorflow-addons==0.19.0 - -# Setup LST-AI -RUN mkdir -p /custom_apps/lst_directory -WORKDIR /custom_apps/lst_directory -RUN git clone https://github.com/CompImg/LST-AI/ -WORKDIR /custom_apps/lst_directory/LST-AI -RUN pip install -e . - -# Setup for greedy (Choose either pre-compiled or compilation from source) - -# There are two possible ways of obtaining 'greedy' -# Download pre-compiled version via wget -# Compile greedy from source (which requires ITK and VTK) - -# Option 1: Download pre-compiled version of greedy -# Download pre-compiled version of greedy and place in $PATH -WORKDIR /custom_apps/lst_directory -RUN wget "https://github.com/CompImg/LST-AI/releases/download/v1.0.0/greedy" && \ - chmod +x greedy && \ - mv greedy /usr/local/bin - -# Option 2: Compile greedy from source -# RUN apt-get update && apt-get install -y \ -# build-essential \ -# libpng-dev \ -# libtiff-dev \ -# uuid-dev \ -# make \ -# cmake \ -# g++ \ -# libgl1-mesa-dev - -# RUN wget https://github.com/InsightSoftwareConsortium/ITK/archive/refs/tags/v5.2.1.tar.gz && \ -# tar -zxvf v5.2.1.tar.gz && \ -# cd ITK-5.2.1 && \ -# mkdir build && \ -# cd build && \ -# cmake .. && \ -# make -j$(nproc) && \ -# make install - -# # Download and extract VTK 9.1.0 -# WORKDIR /opt -# RUN wget https://www.vtk.org/files/release/9.1/VTK-9.1.0.tar.gz && \ -# tar -xf VTK-9.1.0.tar.gz - -# # Build VTK -# WORKDIR /opt/VTK-9.1.0/build -# RUN cmake .. && \ -# make -j$(nproc) && \ -# make install - -# # Set up the VTK_DIR environment variable -# ENV VTK_DIR=/usr/local/lib/cmake/vtk-9.1 - -# Set up the directory tree and clone the greedy repository -# RUN mkdir /custom_apps -# WORKDIR /custom_apps -# RUN git clone https://github.com/pyushkevich/greedy -# RUN echo $PWD -# RUN mkdir -p greedy/build - -# # Set the build directory as the working directory -# WORKDIR /custom_apps/greedy/build -# RUN cmake .. - -# # Compile using make with the available number of CPU cores -# RUN make -j$(nproc) -# RUN make install - -# Install HD-BET -WORKDIR /custom_apps/lst_directory -RUN git clone https://github.com/MIC-DKFZ/HD-BET -WORKDIR /custom_apps/lst_directory/HD-BET -RUN pip install -e . - -# Install PyTorch (Choose the version compatible with the CUDA version) -RUN pip3 install torch torchvision torchaudio - -# Retrieve model weights and files for LST-AI -WORKDIR /custom_apps/lst_directory/ -RUN wget -O /custom_apps/lst_directory/LST-AI/LST_AI/data.zip \ - https://github.com/CompImg/LST-AI/releases/download/v1.0.0/lst_data.zip -WORKDIR /custom_apps/lst_directory/LST-AI/LST_AI/ -RUN unzip data.zip && rm data.zip - -# Retrieve model weights for HD-BET -WORKDIR /custom_apps/lst_directory/ -RUN mkdir -p /root/hd-bet_params -RUN wget -O /root/hd-bet_params/data.zip \ - https://zenodo.org/api/records/2540695/files-archive -WORKDIR /root/hd-bet_params/ -RUN unzip data.zip && rm data.zip - -# Make directories for easily mounting data -# You may change these to your liking -RUN mkdir -p /custom_apps/lst_input -RUN mkdir -p /custom_apps/lst_output -RUN mkdir -p /custom_apps/lst_temp - -# Entrypoint to run the python script when the container starts -ENTRYPOINT [ "lst" ] \ No newline at end of file From 8e2e96aba0133303cf59e285ce5d7042746d6882 Mon Sep 17 00:00:00 2001 From: Julian McGinnis Date: Thu, 1 Feb 2024 10:33:54 +0100 Subject: [PATCH 14/20] remove comments --- LST_AI/utils.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/LST_AI/utils.py b/LST_AI/utils.py index 436054b..58beb79 100644 --- a/LST_AI/utils.py +++ b/LST_AI/utils.py @@ -16,10 +16,6 @@ def download_data(path): binary_path = os.path.join(extract_path, 'binaries') model_path = os.path.join(extract_path, 'model') - # remove testing paths - # testing_path = os.path.join(extract_path, 'testing') - # paths_to_check = [atlas_path, binary_path, model_path, testing_path] - paths_to_check = [atlas_path, binary_path, model_path] # Check if all paths exist. From 6ba37b0508c959abd3eea23ec339d5d72de03bbb Mon Sep 17 00:00:00 2001 From: Julian McGinnis Date: Thu, 1 Feb 2024 11:10:49 +0100 Subject: [PATCH 15/20] bugfix and tf logging disabled --- LST_AI/lst | 4 ++-- LST_AI/segment.py | 2 ++ 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/LST_AI/lst b/LST_AI/lst index c5e5f18..c7207f5 100755 --- a/LST_AI/lst +++ b/LST_AI/lst @@ -358,12 +358,12 @@ if __name__ == "__main__": # Compute Stats of (annotated) segmentation if they exist if os.path.exists(path_orig_segmentation): - compute_stats(mask=path_orig_segmentation, + compute_stats(mask_file=path_orig_segmentation, output_file=os.path.join(args.output, filename_output_stats_segmentation), multi_class=False) if os.path.exists(path_orig_annotated_segmentation): - compute_stats(mask=path_orig_annotated_segmentation, + compute_stats(mask_file=path_orig_annotated_segmentation, output_file=os.path.join(args.output, filename_output_stats_annotated_segmentation), multi_class=True) diff --git a/LST_AI/segment.py b/LST_AI/segment.py index 5344d55..0799df3 100644 --- a/LST_AI/segment.py +++ b/LST_AI/segment.py @@ -1,7 +1,9 @@ import os import logging +logging.getLogger('tensorflow').disabled = True import nibabel as nib import numpy as np +os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' import tensorflow as tf from LST_AI.custom_tf import load_custom_model From 0c0ba39d6c0f3e75e5f692f73caf0c1026bcc4dc Mon Sep 17 00:00:00 2001 From: Julian McGinnis Date: Thu, 1 Feb 2024 11:30:05 +0100 Subject: [PATCH 16/20] update path identifiers --- LST_AI/lst | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/LST_AI/lst b/LST_AI/lst index c7207f5..bf47c1e 100755 --- a/LST_AI/lst +++ b/LST_AI/lst @@ -139,8 +139,8 @@ if __name__ == "__main__": path_mni_stripped_flair = os.path.join(work_dir, 'sub-X_ses-Y_space-mni_desc-stripped_FLAIR.nii.gz') # Masks - path_orig_brainmask_t1w = os.path.join(work_dir, 'sub-X_ses-Y_space-t1w_T1w_mask.nii.gz') - path_orig_brainmask_flair = os.path.join(work_dir, 'sub-X_ses-Y_space-flair_FLAIR_mask.nii.gz') + path_orig_brainmask_t1w = os.path.join(work_dir, 'sub-X_ses-Y_space-t1w_brainmask.nii.gz') + path_orig_brainmask_flair = os.path.join(work_dir, 'sub-X_ses-Y_space-flair_brainmask.nii.gz') path_mni_brainmask = os.path.join(work_dir, 'sub-X_ses-Y_space-mni_brainmask.nii.gz') # Temp Segmentation results @@ -284,7 +284,7 @@ if __name__ == "__main__": # move processed mask to correct naming convention hdbet_mask = path_mni_stripped_t1w.replace(".nii.gz", "_mask.nii.gz") - shutil.copy(hdbet_mask, path_mni_brainmask) + shutil.move(hdbet_mask, path_mni_brainmask) # then apply brain mask to FLAIR apply_mask(input_image=path_mni_flair, From 8ee82ad194ceba2dbbd74c7aeeabe2d234b5a6c5 Mon Sep 17 00:00:00 2001 From: twiltgen Date: Mon, 12 Feb 2024 10:42:06 +0100 Subject: [PATCH 17/20] adjust np and tf requirements --- setup.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/setup.py b/setup.py index bb0e67f..1717051 100644 --- a/setup.py +++ b/setup.py @@ -11,13 +11,13 @@ 'benedict.wiestler@tum.de'], keywords=['lesion_segmentation', 'ms', 'lst', 'ai'], - python_requires='>=3.9', + python_requires='>=3.8', install_requires = [ - 'numpy', + 'numpy<1.24.4', 'pillow', 'scipy>=1.9.0', 'scikit-image>=0.21.0', - 'tensorflow', + 'tensorflow>=2.13', 'nibabel', 'requests' ], From e026a1f0d0aeef34fd9898f163b0980a8c6d0eee Mon Sep 17 00:00:00 2001 From: twiltgen Date: Mon, 12 Feb 2024 12:59:20 +0100 Subject: [PATCH 18/20] rename segmentation temp files --- LST_AI/lst | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/LST_AI/lst b/LST_AI/lst index bf47c1e..8fa259e 100755 --- a/LST_AI/lst +++ b/LST_AI/lst @@ -144,10 +144,10 @@ if __name__ == "__main__": path_mni_brainmask = os.path.join(work_dir, 'sub-X_ses-Y_space-mni_brainmask.nii.gz') # Temp Segmentation results - path_orig_segmentation = os.path.join(work_dir, 'sub-X_ses-Y_space-flair_seg.nii.gz') - path_mni_segmentation = os.path.join(work_dir, 'sub-X_ses-Y_space-mni_seg.nii.gz') - path_orig_annotated_segmentation = os.path.join(work_dir, 'sub-X_ses-Y_space-flair_seg-annotated.nii.gz') - path_mni_annotated_segmentation = os.path.join(work_dir, 'sub-X_ses-Y_space-mni_seg-annotated.nii.gz') + path_orig_segmentation = os.path.join(work_dir, 'sub-X_ses-Y_space-flair_seg-lst.nii.gz') + path_mni_segmentation = os.path.join(work_dir, 'sub-X_ses-Y_space-mni_seg-lst.nii.gz') + path_orig_annotated_segmentation = os.path.join(work_dir, 'sub-X_ses-Y_space-flair_desc-annotated_seg-lst.nii.gz') + path_mni_annotated_segmentation = os.path.join(work_dir, 'sub-X_ses-Y_space-mni_desc-annotated_seg-lst.nii.gz') # Output paths (in original space) filename_output_segmentation = "space-flair_seg-lst.nii.gz" From f3b53f3758d06faceafad94c44dccb5180f236ae Mon Sep 17 00:00:00 2001 From: twiltgen Date: Thu, 15 Feb 2024 09:48:32 +0100 Subject: [PATCH 19/20] rename stats columns --- LST_AI/stats.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/LST_AI/stats.py b/LST_AI/stats.py index fbbb6fe..c79b413 100644 --- a/LST_AI/stats.py +++ b/LST_AI/stats.py @@ -65,7 +65,7 @@ def compute_stats(mask_file, output_file, multi_class): results.append({ 'Num_Lesions': num_lesions, - 'Num_Vox_Lesions': voxel_count, + 'Num_Vox': voxel_count, 'Lesion_Volume': volume }) @@ -77,9 +77,9 @@ def compute_stats(mask_file, output_file, multi_class): for result in results: writer.writerow([result['Region'], result['Num_Lesions'], result['Num_Vox'], result['Lesion_Volume']]) else: - writer.writerow(['Num_Lesions', 'Num_Vox_Lesions', 'Lesion_Volume']) + writer.writerow(['Num_Lesions', 'Num_Vox', 'Lesion_Volume']) for result in results: - writer.writerow([result['Num_Lesions'], result['Num_Vox_Lesions'], result['Lesion_Volume']]) + writer.writerow([result['Num_Lesions'], result['Num_Vox'], result['Lesion_Volume']]) if __name__ == "__main__": """ From 5011d0723bf9be0a523cf7b3a6424456db675143 Mon Sep 17 00:00:00 2001 From: Julian McGinnis <33037028+jqmcginnis@users.noreply.github.com> Date: Thu, 15 Feb 2024 09:49:45 +0100 Subject: [PATCH 20/20] Update Dockerfile release tags --- docker/Dockerfile | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docker/Dockerfile b/docker/Dockerfile index 267b930..9fc765f 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -109,7 +109,7 @@ WORKDIR /custom_apps/lst_directory RUN git clone https://github.com/jqmcginnis/LST-AI/ WORKDIR /custom_apps/lst_directory/LST-AI RUN git pull origin main -RUN git checkout c56fa98b32b0134899b77c480a27278039e9c5c2 +RUN git checkout v1.1.0 # pip or pip3 depending on your system RUN pip install -e . @@ -117,7 +117,7 @@ RUN pip install -e . # Retrieve model weights and files for LST-AI WORKDIR /custom_apps/lst_directory/ RUN wget -O /custom_apps/lst_directory/LST-AI/LST_AI/data.zip \ - https://github.com/CompImg/LST-AI/releases/download/v1.0.0/lst_data.zip + https://github.com/CompImg/LST-AI/releases/download/v1.1.0/lst_data.zip WORKDIR /custom_apps/lst_directory/LST-AI/LST_AI/ RUN unzip data.zip && rm data.zip @@ -128,4 +128,4 @@ RUN mkdir -p /custom_apps/lst_output RUN mkdir -p /custom_apps/lst_temp # Entrypoint to run the python script when the container starts -ENTRYPOINT [ "lst" ] \ No newline at end of file +ENTRYPOINT [ "lst" ]