diff --git a/LST_AI/annotate.py b/LST_AI/annotate.py index e4d5343..210fd2c 100644 --- a/LST_AI/annotate.py +++ b/LST_AI/annotate.py @@ -114,6 +114,7 @@ def annotate_lesions(atlas_t1, atlas_mask, t1w_native, seg_native, out_atlas_war if __name__ == "__main__": + # Only for testing purposes lst_dir = os.getcwd() parent_directory = os.path.dirname(lst_dir) atlas_t1w_path = os.path.join(parent_directory, "atlas", "sub-mni152_space-mni_t1.nii.gz") diff --git a/LST_AI/custom_tf.py b/LST_AI/custom_tf.py new file mode 100644 index 0000000..0d03b5b --- /dev/null +++ b/LST_AI/custom_tf.py @@ -0,0 +1,93 @@ +import tensorflow as tf +import numpy as np + +def load_custom_model(model_path, compile=False): + """ + Loads a custom TensorFlow Keras model from the specified path. + + This function is specifically designed to handle models that originally used the + `tfa.InstanceNormalization` layer from TensorFlow Addons (tfa). Since tfa is no + longer maintained, this function replaces the `InstanceNormalization` layer with a + custom layer, `CustomGroupNormalization`, to ensure compatibility and avoid the need + for installing tfa. + + Args: + model_path (str): The file path to the saved Keras model. + compile (bool): If True, compiles the model after loading. Defaults to False. + + Returns: + tf.keras.Model: The loaded Keras model with `InstanceNormalization` layers replaced + by `CustomGroupNormalization`. + + Example: + >>> model = load_custom_model('path/to/model.h5', compile=True) + """ + custom_objects = { + 'Addons>InstanceNormalization': CustomGroupNormalization, + } + return tf.keras.models.load_model(model_path, custom_objects=custom_objects, compile=compile) + + + +class CustomGroupNormalization(tf.keras.layers.Layer): + """ + Custom Group Normalization layer for TensorFlow Keras models. + + This class provides an alternative to the `tfa.InstanceNormalization` layer found in + TensorFlow Addons (tfa), which is no longer maintained and not available for MAC ARM platforms. + It facilitates the use of group normalization in models without the dependency on tfa, ensuring + compatibility and broader platform support. + + Args: + groups (int): Number of groups for Group Normalization. Default is -1. + **kwargs: Additional keyword arguments for layer configuration. + """ + def __init__(self, groups=-1, **kwargs): + # Extract necessary arguments from kwargs + self.groups = kwargs.pop('groups', -1) + self.epsilon = kwargs.pop('epsilon', 0.001) + self.center = kwargs.pop('center', True) + self.scale = kwargs.pop('scale', True) + self.beta_initializer = kwargs.pop('beta_initializer', 'zeros') + self.gamma_initializer = kwargs.pop('gamma_initializer', 'ones') + self.beta_regularizer = kwargs.pop('beta_regularizer', None) + self.gamma_regularizer = kwargs.pop('gamma_regularizer', None) + self.beta_constraint = kwargs.pop('beta_constraint', None) + self.gamma_constraint = kwargs.pop('gamma_constraint', None) + + # 'axis' argument is not used in GroupNormalization, so we remove it + kwargs.pop('axis', None) + + super(CustomGroupNormalization, self).__init__(**kwargs) + self.group_norm = tf.keras.layers.GroupNormalization( + groups=self.groups, + epsilon=self.epsilon, + center=self.center, + scale=self.scale, + beta_initializer=self.beta_initializer, + gamma_initializer=self.gamma_initializer, + beta_regularizer=self.beta_regularizer, + gamma_regularizer=self.gamma_regularizer, + beta_constraint=self.beta_constraint, + gamma_constraint=self.gamma_constraint, + **kwargs + ) + + def call(self, inputs, training=None): + return self.group_norm(inputs, training=training) + + def get_config(self): + config = super(CustomGroupNormalization, self).get_config() + config.update({ + 'groups': self.groups, + 'epsilon': self.epsilon, + 'center': self.center, + 'scale': self.scale, + 'beta_initializer': self.beta_initializer, + 'gamma_initializer': self.gamma_initializer, + 'beta_regularizer': self.beta_regularizer, + 'gamma_regularizer': self.gamma_regularizer, + 'beta_constraint': self.beta_constraint, + 'gamma_constraint': self.gamma_constraint + }) + return config \ No newline at end of file diff --git a/LST_AI/lst b/LST_AI/lst index 179483c..8fa259e 100755 --- a/LST_AI/lst +++ b/LST_AI/lst @@ -15,19 +15,11 @@ import tempfile import shutil import argparse -# to filter the warning: -# WARNING:root:The given value for groups will be overwritten. -import logging -class Filter(logging.Filter): - def filter(self, record): - return 'The given value for groups will be overwritten.' not in record.getMessage() - -logging.getLogger().addFilter(Filter()) - from LST_AI.strip import run_hdbet, apply_mask from LST_AI.register import mni_registration, apply_warp, rigid_reg from LST_AI.segment import unet_segmentation from LST_AI.annotate import annotate_lesions +from LST_AI.stats import compute_stats from LST_AI.utils import download_data if __name__ == "__main__": @@ -135,10 +127,10 @@ if __name__ == "__main__": os.makedirs(work_dir) # Define Image Paths (original space) - path_org_t1w = os.path.join(work_dir, 'sub-X_ses-Y_space-orig_T1w.nii.gz') - path_org_flair = os.path.join(work_dir, 'sub-X_ses-Y_space-orig_FLAIR.nii.gz') - path_org_stripped_t1w = os.path.join(work_dir, 'sub-X_ses-Y_space-orig_desc-stripped_T1w.nii.gz') - path_org_stripped_flair = os.path.join(work_dir, 'sub-X_ses-Y_space-orig_desc-stripped_FLAIR.nii.gz') + path_org_t1w = os.path.join(work_dir, 'sub-X_ses-Y_space-t1w_T1w.nii.gz') + path_org_flair = os.path.join(work_dir, 'sub-X_ses-Y_space-flair_FLAIR.nii.gz') + path_org_stripped_t1w = os.path.join(work_dir, 'sub-X_ses-Y_space-t1w_desc-stripped_T1w.nii.gz') + path_org_stripped_flair = os.path.join(work_dir, 'sub-X_ses-Y_space-flair_desc-stripped_FLAIR.nii.gz') # Define Image Paths (MNI space) path_mni_t1w = os.path.join(work_dir, 'sub-X_ses-Y_space-mni_T1w.nii.gz') @@ -147,15 +139,23 @@ if __name__ == "__main__": path_mni_stripped_flair = os.path.join(work_dir, 'sub-X_ses-Y_space-mni_desc-stripped_FLAIR.nii.gz') # Masks - path_orig_brainmask_t1w = os.path.join(work_dir, 'sub-X_ses-Y_space-org_T1w_mask.nii.gz') - path_orig_brainmask_flair = os.path.join(work_dir, 'sub-X_ses-Y_space-org_FLAIR_mask.nii.gz') + path_orig_brainmask_t1w = os.path.join(work_dir, 'sub-X_ses-Y_space-t1w_brainmask.nii.gz') + path_orig_brainmask_flair = os.path.join(work_dir, 'sub-X_ses-Y_space-flair_brainmask.nii.gz') path_mni_brainmask = os.path.join(work_dir, 'sub-X_ses-Y_space-mni_brainmask.nii.gz') - # Segmentation results - path_orig_segmentation = os.path.join(work_dir, 'sub-X_ses-Y_space-orig_seg.nii.gz') - path_mni_segmentation = os.path.join(work_dir, 'sub-X_ses-Y_space-mni_seg.nii.gz') - path_orig_annotated_segmentation = os.path.join(work_dir, 'sub-X_ses-Y_space-orig_seg-annotated.nii.gz') - path_mni_annotated_segmentation = os.path.join(work_dir, 'sub-X_ses-Y_space-mni_seg-annotated.nii.gz') + # Temp Segmentation results + path_orig_segmentation = os.path.join(work_dir, 'sub-X_ses-Y_space-flair_seg-lst.nii.gz') + path_mni_segmentation = os.path.join(work_dir, 'sub-X_ses-Y_space-mni_seg-lst.nii.gz') + path_orig_annotated_segmentation = os.path.join(work_dir, 'sub-X_ses-Y_space-flair_desc-annotated_seg-lst.nii.gz') + path_mni_annotated_segmentation = os.path.join(work_dir, 'sub-X_ses-Y_space-mni_desc-annotated_seg-lst.nii.gz') + + # Output paths (in original space) + filename_output_segmentation = "space-flair_seg-lst.nii.gz" + filename_output_annotated_segmentation = "space-flair_desc-annotated_seg-lst.nii.gz" + + # Stats + filename_output_stats_segmentation = "lesion_stats.csv" + filename_output_stats_annotated_segmentation = "annotated_lesion_stats.csv" # affines path_affine_mni_t1w = os.path.join(work_dir, 'affine_t1w_to_mni.mat') @@ -187,6 +187,7 @@ if __name__ == "__main__": # Annotation only if args.annotate_only: + print("LST-AI assumes existing segmentation to be in FLAIR space.") if os.path.isfile(args.existing_seg): shutil.copy(args.existing_seg, path_orig_segmentation) else: @@ -240,7 +241,7 @@ if __name__ == "__main__": out_annotated_native=path_orig_annotated_segmentation) shutil.copy(path_orig_annotated_segmentation, - os.path.join(args.output, "space-orig_desc-annotated_seg-lst.nii.gz")) + os.path.join(args.output, filename_output_annotated_segmentation)) # Segmentation only + (opt. Annotation) @@ -283,8 +284,7 @@ if __name__ == "__main__": # move processed mask to correct naming convention hdbet_mask = path_mni_stripped_t1w.replace(".nii.gz", "_mask.nii.gz") - print(hdbet_mask) - shutil.copy(hdbet_mask, path_mni_brainmask) + shutil.move(hdbet_mask, path_mni_brainmask) # then apply brain mask to FLAIR apply_mask(input_image=path_mni_flair, @@ -333,7 +333,7 @@ if __name__ == "__main__": n_threads=args.threads) # store the segmentations - shutil.copy(path_orig_segmentation, os.path.join(args.output, "space-orig_seg-lst.nii.gz")) + shutil.copy(path_orig_segmentation, os.path.join(args.output, filename_output_segmentation)) # Annotation if not args.segment_only: @@ -354,8 +354,18 @@ if __name__ == "__main__": n_threads=args.threads) # store the segmentations - shutil.copy(path_orig_annotated_segmentation, os.path.join(args.output, "space-orig_desc-annotated_seg-lst.nii.gz")) - + shutil.copy(path_orig_annotated_segmentation, os.path.join(args.output, filename_output_annotated_segmentation)) + + # Compute Stats of (annotated) segmentation if they exist + if os.path.exists(path_orig_segmentation): + compute_stats(mask_file=path_orig_segmentation, + output_file=os.path.join(args.output, filename_output_stats_segmentation), + multi_class=False) + + if os.path.exists(path_orig_annotated_segmentation): + compute_stats(mask_file=path_orig_annotated_segmentation, + output_file=os.path.join(args.output, filename_output_stats_annotated_segmentation), + multi_class=True) print(f"Results in {work_dir}") if not args.temp: diff --git a/LST_AI/register.py b/LST_AI/register.py index 089da0a..74cca8d 100644 --- a/LST_AI/register.py +++ b/LST_AI/register.py @@ -106,10 +106,10 @@ def apply_warp(image_org_space, affine, origin, target, reverse=False, n_threads subprocess.run(shlex.split(warp_call), check=True) - - if __name__ == "__main__": + # Testing only + # Working directory script_dir = os.getcwd() parent_directory = os.path.dirname(script_dir) diff --git a/LST_AI/segment.py b/LST_AI/segment.py index eda4ee1..0799df3 100644 --- a/LST_AI/segment.py +++ b/LST_AI/segment.py @@ -5,9 +5,9 @@ import numpy as np os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' import tensorflow as tf -import tensorflow_addons as tfa -#logging.getLogger("tensorflow").setLevel(logging.CRITICAL) -#logging.getLogger("tensorflow_addons").setLevel(logging.CRITICAL) + +from LST_AI.custom_tf import load_custom_model + def unet_segmentation(model_path, mni_t1, mni_flair, output_segmentation_path, device='cpu', input_shape=(192,192,192), threshold=0.5): """ @@ -99,7 +99,7 @@ def preprocess_intensities(img_arr): for i, model in enumerate(unet_mdls): with tf.device(tf_device): print(f"Running model {i}. ") - mdl = tf.keras.models.load_model(model, compile=False) + mdl = load_custom_model(model, compile=False) img_image = np.stack([flair, t1], axis=-1) img_image = np.expand_dims(img_image, axis=0) @@ -129,7 +129,7 @@ def preprocess_intensities(img_arr): if __name__ == "__main__": - + # Testing only # Working directory script_dir = os.getcwd() parent_dir = os.path.dirname(script_dir) diff --git a/LST_AI/stats.py b/LST_AI/stats.py new file mode 100644 index 0000000..c79b413 --- /dev/null +++ b/LST_AI/stats.py @@ -0,0 +1,95 @@ +import nibabel as nib +import numpy as np +import csv +import argparse +from scipy.ndimage import label + +def compute_stats(mask_file, output_file, multi_class): + """ + Compute statistics from a lesion mask and save the results to a CSV file. + + Parameters: + mask_file (str): Path to the input mask file in NIfTI format. + output_file (str): Path to the output CSV file where results will be saved. + multi_class (bool): Flag indicating whether the mask contains multiple classes (True) or is binary (False). + + This function calculates the number of lesions, the number of voxels in lesions, and the total lesion volume. + If `multi_class` is True, these statistics are calculated for each lesion class separately. + """ + # Load the mask file + mask = nib.load(mask_file) + mask_data = mask.get_fdata() + + # Voxel dimensions to calculate volume + voxel_dims = mask.header.get_zooms() + + results = [] + + if multi_class: + # Multi-class processing + lesion_labels = [1, 2, 3, 4] + label_names = { + 1: 'Periventricular', + 2: 'Juxtacortical', + 3: 'Subcortical', + 4: 'Infratentorial' + } + + for lesion_label in lesion_labels: + class_mask = mask_data == lesion_label + + # Count lesions (connected components) for each class + _ , num_lesions = label(class_mask) + + voxel_count = np.count_nonzero(class_mask) + volume = voxel_count * np.prod(voxel_dims) + + results.append({ + 'Region': label_names[lesion_label], + 'Num_Lesions': num_lesions, + 'Num_Vox': voxel_count, + 'Lesion_Volume': volume + }) + + else: + # Binary mask processing + # Assert that only two unique values are present (0 and 1) + unique_values = np.unique(mask_data) + assert len(unique_values) <= 2, "Binary mask must contain no more than two unique values." + + # Count lesions (connected components) in binary mask + _, num_lesions = label(mask_data > 0) + + voxel_count = np.count_nonzero(mask_data) + volume = voxel_count * np.prod(voxel_dims) + + results.append({ + 'Num_Lesions': num_lesions, + 'Num_Vox': voxel_count, + 'Lesion_Volume': volume + }) + + # Save results to CSV + with open(output_file, 'w', newline='') as file: + writer = csv.writer(file) + if multi_class: + writer.writerow(['Region', 'Num_Lesions', 'Num_Vox', 'Lesion_Volume']) + for result in results: + writer.writerow([result['Region'], result['Num_Lesions'], result['Num_Vox'], result['Lesion_Volume']]) + else: + writer.writerow(['Num_Lesions', 'Num_Vox', 'Lesion_Volume']) + for result in results: + writer.writerow([result['Num_Lesions'], result['Num_Vox'], result['Lesion_Volume']]) + +if __name__ == "__main__": + """ + Main entry point of the script. Parses command-line arguments and calls the compute_stats function. + """ + parser = argparse.ArgumentParser(description='Process a lesion mask file.') + parser.add_argument('--in', dest='input_file', required=True, help='Input mask file path') + parser.add_argument('--out', dest='output_file', required=True, help='Output CSV file path') + parser.add_argument('--multi-class', dest='multi_class', action='store_true', help='Flag for multi-class processing') + + args = parser.parse_args() + + compute_stats(args.input_file, args.output_file, args.multi_class) diff --git a/LST_AI/strip.py b/LST_AI/strip.py index 6b731c6..9b51008 100644 --- a/LST_AI/strip.py +++ b/LST_AI/strip.py @@ -4,7 +4,22 @@ import numpy as np def run_hdbet(input_image, output_image, device, mode="accurate"): - assert mode in ["accurate","fast"], 'Unkown HD-BET mode. Please choose either "accurate" or "fast"' + """ + Runs the HD-BET tool to perform brain extraction on an input image. + + Parameters: + input_image (str): Path to the input image file. + output_image (str): Path for the output image file. + device (str): The device to use for computation, either a GPU device number or 'cpu'. + mode (str, optional): Operation mode of HD-BET. Can be 'accurate' or 'fast'. Default is 'accurate'. + + Raises: + AssertionError: If an unknown mode is provided. + + This function utilizes HD-BET, a tool for brain extraction from MRI images. Depending on the chosen mode + and device, it executes the appropriate command. + """ + assert mode in ["accurate","fast"], 'Unknown HD-BET mode. Please choose either "accurate" or "fast"' if "cpu" in str(device).lower(): bet_call = f"hd-bet -i {input_image} -device cpu -mode {mode} -tta 0 -o {output_image}" @@ -14,6 +29,18 @@ def run_hdbet(input_image, output_image, device, mode="accurate"): subprocess.run(shlex.split(bet_call), check=True) def apply_mask(input_image, mask, output_image): + """ + Applies a mask to an input image and saves the result. + + Parameters: + input_image (str): Path to the input image file. + mask (str): Path to the mask image file. + output_image (str): Path for the output image file where the masked image will be saved. + + This function loads a brain mask and an input image, applies the mask to the input image, + and then saves the result. The mask and the input image are expected to be in a compatible format + and spatial alignment. + """ brain_mask_arr = nib.load(mask).get_fdata() image_nib = nib.load(input_image) image_arr = np.multiply(image_nib.get_fdata(), brain_mask_arr) diff --git a/LST_AI/utils.py b/LST_AI/utils.py index 436054b..58beb79 100644 --- a/LST_AI/utils.py +++ b/LST_AI/utils.py @@ -16,10 +16,6 @@ def download_data(path): binary_path = os.path.join(extract_path, 'binaries') model_path = os.path.join(extract_path, 'model') - # remove testing paths - # testing_path = os.path.join(extract_path, 'testing') - # paths_to_check = [atlas_path, binary_path, model_path, testing_path] - paths_to_check = [atlas_path, binary_path, model_path] # Check if all paths exist. diff --git a/cpu/Dockerfile b/cpu/Dockerfile deleted file mode 100644 index 6357cd2..0000000 --- a/cpu/Dockerfile +++ /dev/null @@ -1,112 +0,0 @@ -# Start with the base Ubuntu 22.04 LTS image -FROM ubuntu:22.04 - -# Prevents prompts from asking for user input during package installation -ENV DEBIAN_FRONTEND=noninteractive - -# Update and install required packages -RUN apt-get update && apt-get install -y \ - git \ - wget \ - unzip \ - python3 \ - python3-pip - -# Setup LST-AI -RUN mkdir -p /custom_apps/lst_directory -WORKDIR /custom_apps/lst_directory -RUN git clone https://github.com/CompImg/LST-AI/ -WORKDIR /custom_apps/lst_directory/LST-AI -RUN pip install -e . - -# Setup for greedy (Choose either pre-compiled or compilation from source) - -# There are two possible ways of obtaining 'greedy' -# Download pre-compiled version via wget -# Compile greedy from source (which requires ITK and VTK) - -# Option 1: Download pre-compiled version of greedy -# Download pre-compiled version of greedy and place in $PATH -WORKDIR /custom_apps/lst_directory -RUN wget "https://github.com/CompImg/LST-AI/releases/download/v1.0.0/greedy" && \ - chmod +x greedy && \ - mv greedy /usr/local/bin - -# Option 2: Compile greedy from source -# RUN apt-get update && apt-get install -y \ -# build-essential \ -# libpng-dev \ -# libtiff-dev \ -# uuid-dev \ -# make \ -# cmake \ -# g++ \ -# libgl1-mesa-dev - -# RUN wget https://github.com/InsightSoftwareConsortium/ITK/archive/refs/tags/v5.2.1.tar.gz && \ -# tar -zxvf v5.2.1.tar.gz && \ -# cd ITK-5.2.1 && \ -# mkdir build && \ -# cd build && \ -# cmake .. && \ -# make -j$(nproc) && \ -# make install - -# # Download and extract VTK 9.1.0 -# WORKDIR /opt -# RUN wget https://www.vtk.org/files/release/9.1/VTK-9.1.0.tar.gz && \ -# tar -xf VTK-9.1.0.tar.gz - -# # Build VTK -# WORKDIR /opt/VTK-9.1.0/build -# RUN cmake .. && \ -# make -j$(nproc) && \ -# make install - -# # Set up the VTK_DIR environment variable -# ENV VTK_DIR=/usr/local/lib/cmake/vtk-9.1 - -# Set up the directory tree and clone the greedy repository -# RUN mkdir /custom_apps -# WORKDIR /custom_apps -# RUN git clone https://github.com/pyushkevich/greedy -# RUN echo $PWD -# RUN mkdir -p greedy/build - -# # Set the build directory as the working directory -# WORKDIR /custom_apps/greedy/build -# RUN cmake .. - -# # Compile using make with the available number of CPU cores -# RUN make -j$(nproc) -# RUN make install - -# Install HD-BET -WORKDIR /custom_apps/lst_directory -RUN git clone https://github.com/MIC-DKFZ/HD-BET -WORKDIR /custom_apps/lst_directory/HD-BET -RUN pip install -e . - -# Retrieve model weights and files for LST-AI -WORKDIR /custom_apps/lst_directory/ -RUN wget -O /custom_apps/lst_directory/LST-AI/LST_AI/data.zip \ - https://github.com/CompImg/LST-AI/releases/download/v1.0.0/lst_data.zip -WORKDIR /custom_apps/lst_directory/LST-AI/LST_AI/ -RUN unzip data.zip && rm data.zip - -# Retrieve model weights for HD-BET -WORKDIR /custom_apps/lst_directory/ -RUN mkdir -p /root/hd-bet_params -RUN wget -O /root/hd-bet_params/data.zip \ - https://zenodo.org/api/records/2540695/files-archive -WORKDIR /root/hd-bet_params/ -RUN unzip data.zip && rm data.zip - -# Make directories for easily mounting data -# You may change these to your liking -RUN mkdir -p /custom_apps/lst_input -RUN mkdir -p /custom_apps/lst_output -RUN mkdir -p /custom_apps/lst_temp - -# Entrypoint to run the python script when the container starts -ENTRYPOINT [ "lst" ] \ No newline at end of file diff --git a/docker/Dockerfile b/docker/Dockerfile new file mode 100644 index 0000000..9fc765f --- /dev/null +++ b/docker/Dockerfile @@ -0,0 +1,131 @@ +FROM nvidia/cuda:12.3.1-runtime-ubuntu22.04 + +# Prevents prompts from asking for user input during package installation +ENV DEBIAN_FRONTEND=noninteractive + +# Update and install required packages +RUN apt-get update && apt-get install -y \ + git \ + wget \ + unzip + +# copied from https://stackoverflow.com/a/76170605/3485363 +RUN apt-get update && DEBIAN_FRONTEND=noninteractive \ + apt-get install -y software-properties-common && \ + add-apt-repository -y ppa:deadsnakes/ppa && \ + apt-get install -y python3.10 curl && \ + curl -sS https://bootstrap.pypa.io/get-pip.py | python3.10 + +RUN pip3 install --upgrade requests +RUN ln -fs /usr/bin/python3.10 /usr/bin/python +RUN python --version + +# # Setup LST-AI +RUN mkdir -p /custom_apps/lst_directory + +# Install prerequisites +# c.f. https://greedy.readthedocs.io/en/latest/install.html#compiling-from-source-code +RUN apt-get update && \ + apt-get install -y cmake g++ git + +# Install additional dependencies for VTK +RUN apt-get install -y libgl1-mesa-dev libxt-dev + +# Install libpng +RUN apt-get install -y libpng-dev + +# Build VTK +# Download and unpack VTK +WORKDIR /VTK +RUN git clone https://gitlab.kitware.com/vtk/vtk.git +WORKDIR /VTK/vtk +RUN git checkout v9.1.0 + +# Create and navigate to the build directory for VTK +RUN mkdir VTK-build +WORKDIR /VTK/vtk/VTK-build +# ENV LD_LIBRARY_PATH=/VTK/vtk/VTK-build:$LD_LIBRARY_PATH + +# Run CMake to configure and build VTK +RUN cmake -DCMAKE_BUILD_TYPE=Release -DBUILD_SHARED_LIBS=OFF .. +RUN make -j ${BUILD_JOBS} +RUN make install + +# Build ITK +# c.f. https://itk.org/Wiki/ITK/Getting_Started/Build/Linux +# Clone the ITK repository +RUN git clone https://github.com/InsightSoftwareConsortium/ITK.git /ITK +WORKDIR /ITK + +# Checkout the specific version +RUN git checkout v5.1.2 + +# Create and navigate to the build directory +RUN mkdir -p /ITK/build +WORKDIR /ITK/build + +# Run CMake to configure and build ITK +RUN cmake -DModule_ITKPNG=ON \ + -DITK_USE_SYSTEM_PNG=ON \ + -DBUILD_TESTING=OFF \ + -DBUILD_SHARED_LIBS=OFF \ + -DCMAKE_BUILD_TYPE=Release .. +# run build process +RUN make -j ${BUILD_JOBS} +RUN make install + +# Clone the greedy repository +RUN git clone https://github.com/pyushkevich/greedy /greedy +WORKDIR /greedy +RUN git checkout 1eafa4c6659b7a669fb299ce98d9531fc23e332a + +# Set the working directory to the build directory +WORKDIR /greedy/build + +# Run ccmake from the build directory +RUN cmake -DITK_DIR=/ITK/build \ + -DVTK_DIR=/VTK/vtk/VTK-build \ + -DCMAKE_BUILD_TYPE=Release \ + -DBUILD_SHARED_LIBS=OFF \ + .. +RUN make -j ${BUILD_JOBS} +RUN make install + +# Install HD-BET +WORKDIR /custom_apps/lst_directory +RUN git clone https://github.com/MIC-DKFZ/HD-BET +WORKDIR /custom_apps/lst_directory/HD-BET +RUN pip install -e . + +# Retrieve model weights for HD-BET +WORKDIR /custom_apps/lst_directory/ +RUN mkdir -p /root/hd-bet_params +RUN wget -O /root/hd-bet_params/data.zip \ + https://zenodo.org/api/records/2540695/files-archive +WORKDIR /root/hd-bet_params/ +RUN unzip data.zip && rm data.zip + +WORKDIR /custom_apps/lst_directory +RUN git clone https://github.com/jqmcginnis/LST-AI/ +WORKDIR /custom_apps/lst_directory/LST-AI +RUN git pull origin main +RUN git checkout v1.1.0 + +# pip or pip3 depending on your system +RUN pip install -e . + +# Retrieve model weights and files for LST-AI +WORKDIR /custom_apps/lst_directory/ +RUN wget -O /custom_apps/lst_directory/LST-AI/LST_AI/data.zip \ + https://github.com/CompImg/LST-AI/releases/download/v1.1.0/lst_data.zip +WORKDIR /custom_apps/lst_directory/LST-AI/LST_AI/ +RUN unzip data.zip && rm data.zip + +# Make directories for easily mounting data +# You may change these to your liking +RUN mkdir -p /custom_apps/lst_input +RUN mkdir -p /custom_apps/lst_output +RUN mkdir -p /custom_apps/lst_temp + +# Entrypoint to run the python script when the container starts +ENTRYPOINT [ "lst" ] diff --git a/docker/Readme.md b/docker/Readme.md new file mode 100644 index 0000000..844aacf --- /dev/null +++ b/docker/Readme.md @@ -0,0 +1,51 @@ +#### Building the docker + +Info: We are happy to support both ARM64 and AMD64 platforms with the newest docker container. + +#### Guide on how to build the docker natively, and (tp push it to dockerhub) + +To build and push a Docker image for both linux/amd64 and linux/arm64/v8 platforms and then push it to Docker Hub under the name jqmcginnis/lst-ai, you can follow these steps: + +#### 1. Log in to dockerhub + +Open your terminal and log in to your Docker Hub account using the Docker CLI: + +```bash +docker login +``` +Enter your Docker Hub username and password when prompted. + +#### 2. Enable Buildx (if not already enabled) + +Docker Buildx is an extended build feature that supports building multi-platform images. To ensure it is enabled, run: + +```bash +docker buildx create --use --name mybuilder +``` + +#### 3. Start a New Buildx Builder Instance + +This step ensures that the builder instance is started and uses the newly created builder: + +```bash +docker buildx use mybuilder +docker buildx inspect --bootstrap +``` + +#### 4. Build and Push the Image + +Navigate to the directory where your Dockerfile is located, then build and push the image for both platforms. Replace path/to/dockerfile with the actual path to your Dockerfile if it's not in the current directory: + +```bash +docker buildx build --platform linux/amd64,linux/arm64/v8 -t jqmcginnis/lst-ai --push . +``` +This command will build the image for amd64 and arm64/v8 architectures and push it to Docker Hub under the repository jqmcginnis/lst-ai. It may take several hpurs (!). + +#### 5. Verify the Push + +Navigate to the directory where your Dockerfile is located, then build and push the image for both platforms. Replace path/to/dockerfile with the actual path to your Dockerfile if it's not in the current directory: + +```bash +docker buildx build --platform linux/amd64,linux/arm64/v8 -t jqmcginnis/lst-ai --push . +``` +This command will build the image for amd64 and arm64/v8 architectures and push it to Docker Hub under the repository jqmcginnis/lst-ai. It may take several hours (!). \ No newline at end of file diff --git a/gpu/Dockerfile b/gpu/Dockerfile deleted file mode 100644 index d436a0c..0000000 --- a/gpu/Dockerfile +++ /dev/null @@ -1,122 +0,0 @@ -# Start with NVIDIA CUDA image for GPU support -FROM nvidia/cuda:12.1.0-runtime-ubuntu20.04 - -# Prevents prompts from asking for user input during package installation -ENV DEBIAN_FRONTEND=noninteractive - -# Update and install required packages -RUN apt-get update && apt-get install -y \ - git \ - wget \ - unzip \ - python3 \ - python3-pip - -RUN pip install --upgrade pip -# Install PyTorch (Choose the version compatible with the CUDA version) -RUN pip3 install torch torchvision torchaudio -# Install TensorFlow (Choose the version compatible with the CUDA version) -RUN pip3 install tensorflow-gpu==2.11.0 -RUN pip3 install tensorflow-addons==0.19.0 - -# Setup LST-AI -RUN mkdir -p /custom_apps/lst_directory -WORKDIR /custom_apps/lst_directory -RUN git clone https://github.com/CompImg/LST-AI/ -WORKDIR /custom_apps/lst_directory/LST-AI -RUN pip install -e . - -# Setup for greedy (Choose either pre-compiled or compilation from source) - -# There are two possible ways of obtaining 'greedy' -# Download pre-compiled version via wget -# Compile greedy from source (which requires ITK and VTK) - -# Option 1: Download pre-compiled version of greedy -# Download pre-compiled version of greedy and place in $PATH -WORKDIR /custom_apps/lst_directory -RUN wget "https://github.com/CompImg/LST-AI/releases/download/v1.0.0/greedy" && \ - chmod +x greedy && \ - mv greedy /usr/local/bin - -# Option 2: Compile greedy from source -# RUN apt-get update && apt-get install -y \ -# build-essential \ -# libpng-dev \ -# libtiff-dev \ -# uuid-dev \ -# make \ -# cmake \ -# g++ \ -# libgl1-mesa-dev - -# RUN wget https://github.com/InsightSoftwareConsortium/ITK/archive/refs/tags/v5.2.1.tar.gz && \ -# tar -zxvf v5.2.1.tar.gz && \ -# cd ITK-5.2.1 && \ -# mkdir build && \ -# cd build && \ -# cmake .. && \ -# make -j$(nproc) && \ -# make install - -# # Download and extract VTK 9.1.0 -# WORKDIR /opt -# RUN wget https://www.vtk.org/files/release/9.1/VTK-9.1.0.tar.gz && \ -# tar -xf VTK-9.1.0.tar.gz - -# # Build VTK -# WORKDIR /opt/VTK-9.1.0/build -# RUN cmake .. && \ -# make -j$(nproc) && \ -# make install - -# # Set up the VTK_DIR environment variable -# ENV VTK_DIR=/usr/local/lib/cmake/vtk-9.1 - -# Set up the directory tree and clone the greedy repository -# RUN mkdir /custom_apps -# WORKDIR /custom_apps -# RUN git clone https://github.com/pyushkevich/greedy -# RUN echo $PWD -# RUN mkdir -p greedy/build - -# # Set the build directory as the working directory -# WORKDIR /custom_apps/greedy/build -# RUN cmake .. - -# # Compile using make with the available number of CPU cores -# RUN make -j$(nproc) -# RUN make install - -# Install HD-BET -WORKDIR /custom_apps/lst_directory -RUN git clone https://github.com/MIC-DKFZ/HD-BET -WORKDIR /custom_apps/lst_directory/HD-BET -RUN pip install -e . - -# Install PyTorch (Choose the version compatible with the CUDA version) -RUN pip3 install torch torchvision torchaudio - -# Retrieve model weights and files for LST-AI -WORKDIR /custom_apps/lst_directory/ -RUN wget -O /custom_apps/lst_directory/LST-AI/LST_AI/data.zip \ - https://github.com/CompImg/LST-AI/releases/download/v1.0.0/lst_data.zip -WORKDIR /custom_apps/lst_directory/LST-AI/LST_AI/ -RUN unzip data.zip && rm data.zip - -# Retrieve model weights for HD-BET -WORKDIR /custom_apps/lst_directory/ -RUN mkdir -p /root/hd-bet_params -RUN wget -O /root/hd-bet_params/data.zip \ - https://zenodo.org/api/records/2540695/files-archive -WORKDIR /root/hd-bet_params/ -RUN unzip data.zip && rm data.zip - -# Make directories for easily mounting data -# You may change these to your liking -RUN mkdir -p /custom_apps/lst_input -RUN mkdir -p /custom_apps/lst_output -RUN mkdir -p /custom_apps/lst_temp - -# Entrypoint to run the python script when the container starts -ENTRYPOINT [ "lst" ] \ No newline at end of file diff --git a/setup.py b/setup.py index df5ada6..1717051 100644 --- a/setup.py +++ b/setup.py @@ -6,21 +6,20 @@ url='https://github.com/CompImg/LST-AI', author='LST-AI Team', author_email=['julian.mcginnis@tum.de', - 'tun.wiltgen@tum.de', - 'mark.muehlau@tum.de', + 'tun.wiltgen@tum.de', + 'mark.muehlau@tum.de', 'benedict.wiestler@tum.de'], - keywords=['lesion_segmentation', + keywords=['lesion_segmentation', 'ms', 'lst', 'ai'], python_requires='>=3.8', install_requires = [ - 'numpy<1.24.0', - 'pillow<10.1.0', + 'numpy<1.24.4', + 'pillow', 'scipy>=1.9.0', 'scikit-image>=0.21.0', - 'tensorflow<2.12.0', - 'tensorflow-addons<0.20.0', - 'nibabel>=4.0.0', - 'requests>=2.20.0' + 'tensorflow>=2.13', + 'nibabel', + 'requests' ], scripts=['LST_AI/lst'], license='MIT',