From ece27351f9a233299203f77ecfcc6e148e851ba9 Mon Sep 17 00:00:00 2001 From: Sajtospoga01 Date: Sun, 12 Mar 2023 19:53:32 +0000 Subject: [PATCH 01/59] adds a flow data generator specific for segmentation tasks, and a image_encoder that includes image preprocessing utilities. --- utilities/segmentation_utils/__init__.py | 0 utilities/segmentation_utils/flowreader.py | 132 ++++++++++++++++++ utilities/segmentation_utils/image_encoder.py | 65 +++++++++ 3 files changed, 197 insertions(+) create mode 100644 utilities/segmentation_utils/__init__.py create mode 100644 utilities/segmentation_utils/flowreader.py create mode 100644 utilities/segmentation_utils/image_encoder.py diff --git a/utilities/segmentation_utils/__init__.py b/utilities/segmentation_utils/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/utilities/segmentation_utils/flowreader.py b/utilities/segmentation_utils/flowreader.py new file mode 100644 index 0000000..0ab50e9 --- /dev/null +++ b/utilities/segmentation_utils/flowreader.py @@ -0,0 +1,132 @@ +import os +from keras.preprocessing.image import ImageDataGenerator +from utilities.segmentation_utils.image_encoder import ImagePreprocessor +#! important: as the I have no clue how we can mount this repo as a package the import is relative to the working directory + + + +class FlowGenerator: + def __init__( + self, + image_path, + mask_path, + image_size, + num_classes, + shuffle=True, + batch_size=32, + ): + """ + Initializes the flow generator object + + Parameters: + ---------- + image (string): path to the image directory + mask (string): path to the mask directory + batch_size (int): batch size + image_size (tuple): image size + num_classes (int): number of classes + shuffle (bool): whether to shuffle the dataset or not + + Returns: + ------- + None + """ + + self.image_path = image_path + self.mask_path = mask_path + self.batch_size = batch_size + self.image_size = image_size + self.num_classes = num_classes + self.shuffle = shuffle + self.__make_generator() + print("Reading images from: ", self.image_path) + + def get_dataset_size(self): + """ + Returns the length of the dataset + + Parameters: + ---------- + None + + Returns: + ------- + int: length of the dataset + + """ + + return len(os.listdir(os.path.join(self.image_path, "img"))) + + def __make_generator(self): + """ + Creates the generator + + Parameters: + ---------- + None + + Returns: + ------- + None + + """ + seed = 909 + image_datagen = ImageDataGenerator() + mask_datagen = ImageDataGenerator() + + image_generator = image_datagen.flow_from_directory( + self.image_path, + class_mode=None, + seed=seed, + batch_size=self.batch_size, + target_size=self.image_size, + ) + + mask_generator = mask_datagen.flow_from_directory( + self.mask_path, + class_mode=None, + seed=seed, + batch_size=self.batch_size, + target_size=(self.image_size[0] // 2 * self.image_size[1] // 2, 1), + color_mode="grayscale", + ) + + self.train_generator = zip(image_generator, mask_generator) + self.train_generator = self.preprocess(self.train_generator) + + def get_generator(self): + """ + Returns the generator + + Parameters: + ---------- + None + + Returns: + ------- + generator: generator object + + """ + return self.train_generator + + def preprocess(self, generator_zip): + """ + Preprocessor function to augments the images and masks and onehot encodes the masks + + Parameters: + ---------- + generator_zip (tuple): tuple of image and mask generator + + Returns: + ------- + generator: generator batch + """ + for (img, mask) in generator_zip: + for i in range(len(img)): + img[i], mask[i] = ImagePreprocessor.augmentation_pipeline( + img[i], mask[i], self.image_size + ) + mask = ImagePreprocessor.onehot_encode( + mask, self.image_size, self.num_classes + ) + yield (img, mask) diff --git a/utilities/segmentation_utils/image_encoder.py b/utilities/segmentation_utils/image_encoder.py new file mode 100644 index 0000000..b37d1d5 --- /dev/null +++ b/utilities/segmentation_utils/image_encoder.py @@ -0,0 +1,65 @@ +import numpy as np +import tensorflow as tf + + +class ImagePreprocessor: + @classmethod + def onehot_encode(self, masks, image_size, num_classes): + """ + Onehot encodes the images coming from the image generator object + + Parameters: + ---------- + masks (tf tensor): masks to be onehot encoded + + Returns: + ------- + encoded (tf tensor): onehot encoded masks + """ + encoded = np.zeros( + (masks.shape[0], image_size[0] // 2 * image_size[1] // 2, num_classes) + ) + for i in range(num_classes): + encoded[:, :, i] = tf.squeeze((masks == i).astype(int)) + + return encoded + + @classmethod + def augmentation_pipeline(self, image, mask, input_size, channels=3): + """ + Applies augmentation pipeline to the image and mask + + Parameters: + ---------- + image (tf tensor): image to be augmented + mask (tf tensor): mask to be augmented + input_size (tuple): size of the input image + + Returns: + ------- + image (tf tensor): augmented image + mask (tf tensor): augmented mask + """ + + input_size = (input_size[0], input_size[1], channels) + + seed = np.random.randint(0, 100000) + + image = tf.image.random_flip_left_right(image, seed=seed) + image = tf.image.random_flip_up_down(image, seed=seed) + image = tf.image.random_brightness(image, 0.2, seed=seed) + image = tf.image.random_contrast(image, 0.8, 1.2, seed=seed) + image = tf.image.random_saturation(image, 0.8, 1.2, seed=seed) + image = tf.image.random_hue(image, 0.2, seed=seed) + + + mask = tf.image.random_flip_left_right(mask, seed=seed) + mask = tf.image.random_flip_up_down(mask, seed=seed) + + + return image, mask + + @classmethod + def flatten(self, image, input_size, channels=1): + return tf.reshape(image, (input_size[0] * input_size[1], channels)) + From 70186e198fd1287bb94c92bee8c95afe9d49718e Mon Sep 17 00:00:00 2001 From: Sajtospoga01 Date: Sun, 12 Mar 2023 20:38:31 +0000 Subject: [PATCH 02/59] add instructions to readme on installation --- README.md | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/README.md b/README.md index e69de29..f89c5eb 100644 --- a/README.md +++ b/README.md @@ -0,0 +1,17 @@ +Note before installation: None of these commands have been properly tested. Make sure you installed the package in a virtual environment. + +For installing the utilities repo as a package use the following commands in the terminal: +Note: you need to have a working ssh key to access github from your current machine. + +``` +pip install git+ssh://git@github.com:guorbit/utilities.git + +``` + + +Alternatively the following command can be used to install a git repo AFTER cloning it: +Note: the path below has to be modified to point to the package directory. +``` +pip install git+file:///path/to/your/package#egg=package-name + +``` From 31746d6c79553fbf60873e0e7b8747f230663ed9 Mon Sep 17 00:00:00 2001 From: Sajtospoga01 Date: Sun, 12 Mar 2023 22:59:44 +0000 Subject: [PATCH 03/59] updates gitignore to cover python cache files, adds image onehot encoder test to be ran by pytest --- .gitignore | 137 ++++++++++++++++++ .../image_encoder_test.py | 25 ++++ 2 files changed, 162 insertions(+) create mode 100644 tests/segmentation_utils_tests.py/image_encoder_test.py diff --git a/.gitignore b/.gitignore index e69de29..164719c 100644 --- a/.gitignore +++ b/.gitignore @@ -0,0 +1,137 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +pip-wheel-metadata/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +.python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ +/archive/ +/archive_resized/ +/checkpoints/ +/exported_models/ +archive.lnk + +colab_notebook.ipynb + diff --git a/tests/segmentation_utils_tests.py/image_encoder_test.py b/tests/segmentation_utils_tests.py/image_encoder_test.py new file mode 100644 index 0000000..8da7757 --- /dev/null +++ b/tests/segmentation_utils_tests.py/image_encoder_test.py @@ -0,0 +1,25 @@ +from utilities.segmentation_utils.image_encoder import ImagePreprocessor +import numpy as np + +def test_image_onehot_encoder()->None: + # predifining input variables + + n_classes = 2 + batch_size = 1 + image_size = (256, 256) + + # creating a mask with 2 classes + mask = np.zeros((batch_size,image_size[0]//2 * image_size[1]//2)) + mask[:,::2] = 1 + + # creating a onehot mask to compare with the output of the function + onehot_test = np.zeros((batch_size,image_size[0]//2 * image_size[1]//2,n_classes)) + onehot_test[:,::2,1] = 1 + onehot_test[:,1::2,0] = 1 + + one_hot_image = ImagePreprocessor.onehot_encode(mask,image_size,n_classes) + + assert one_hot_image.shape == (1, image_size[0]//2 * image_size[1]//2, n_classes) + assert np.array_equal(one_hot_image,onehot_test) + + \ No newline at end of file From 0b247a6bed1992937395f312418637958e8b05e0 Mon Sep 17 00:00:00 2001 From: Sajtospoga01 Date: Mon, 13 Mar 2023 00:06:00 +0000 Subject: [PATCH 04/59] separates seed variables out of flowreader.py and image_encoder.py --- utilities/segmentation_utils/flowreader.py | 12 ++++++++---- utilities/segmentation_utils/image_encoder.py | 3 +-- 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/utilities/segmentation_utils/flowreader.py b/utilities/segmentation_utils/flowreader.py index 0ab50e9..b98b66f 100644 --- a/utilities/segmentation_utils/flowreader.py +++ b/utilities/segmentation_utils/flowreader.py @@ -1,4 +1,5 @@ import os +import numpy as np from keras.preprocessing.image import ImageDataGenerator from utilities.segmentation_utils.image_encoder import ImagePreprocessor #! important: as the I have no clue how we can mount this repo as a package the import is relative to the working directory @@ -14,6 +15,7 @@ def __init__( num_classes, shuffle=True, batch_size=32, + seed=909, ): """ Initializes the flow generator object @@ -38,6 +40,7 @@ def __init__( self.image_size = image_size self.num_classes = num_classes self.shuffle = shuffle + self.seed = seed self.__make_generator() print("Reading images from: ", self.image_path) @@ -70,14 +73,14 @@ def __make_generator(self): None """ - seed = 909 + image_datagen = ImageDataGenerator() mask_datagen = ImageDataGenerator() image_generator = image_datagen.flow_from_directory( self.image_path, class_mode=None, - seed=seed, + seed=self.seed, batch_size=self.batch_size, target_size=self.image_size, ) @@ -85,7 +88,7 @@ def __make_generator(self): mask_generator = mask_datagen.flow_from_directory( self.mask_path, class_mode=None, - seed=seed, + seed=self.seed, batch_size=self.batch_size, target_size=(self.image_size[0] // 2 * self.image_size[1] // 2, 1), color_mode="grayscale", @@ -123,8 +126,9 @@ def preprocess(self, generator_zip): """ for (img, mask) in generator_zip: for i in range(len(img)): + image_seed = np.random.randint(0, 100000) img[i], mask[i] = ImagePreprocessor.augmentation_pipeline( - img[i], mask[i], self.image_size + img[i], mask[i], self.image_size,seed=image_seed ) mask = ImagePreprocessor.onehot_encode( mask, self.image_size, self.num_classes diff --git a/utilities/segmentation_utils/image_encoder.py b/utilities/segmentation_utils/image_encoder.py index b37d1d5..7a0c388 100644 --- a/utilities/segmentation_utils/image_encoder.py +++ b/utilities/segmentation_utils/image_encoder.py @@ -25,7 +25,7 @@ def onehot_encode(self, masks, image_size, num_classes): return encoded @classmethod - def augmentation_pipeline(self, image, mask, input_size, channels=3): + def augmentation_pipeline(self, image, mask, input_size, channels=3,seed=0): """ Applies augmentation pipeline to the image and mask @@ -43,7 +43,6 @@ def augmentation_pipeline(self, image, mask, input_size, channels=3): input_size = (input_size[0], input_size[1], channels) - seed = np.random.randint(0, 100000) image = tf.image.random_flip_left_right(image, seed=seed) image = tf.image.random_flip_up_down(image, seed=seed) From 57435b39a9820ffd393a3cf8cbc972b3b15f897d Mon Sep 17 00:00:00 2001 From: Sajtospoga01 Date: Mon, 13 Mar 2023 13:55:04 +0000 Subject: [PATCH 05/59] changes the package name from image_encoder to ImagePreprocessor, removes inner class. --- .../flow_reader_test.py | 6 ++ ...der_test.py => image_preprocessor_test.py} | 2 +- .../segmentation_utils/ImagePreprocessor.py | 51 +++++++++++++++ utilities/segmentation_utils/flowreader.py | 13 ++-- utilities/segmentation_utils/image_encoder.py | 64 ------------------- 5 files changed, 65 insertions(+), 71 deletions(-) create mode 100644 tests/segmentation_utils_tests.py/flow_reader_test.py rename tests/segmentation_utils_tests.py/{image_encoder_test.py => image_preprocessor_test.py} (88%) create mode 100644 utilities/segmentation_utils/ImagePreprocessor.py delete mode 100644 utilities/segmentation_utils/image_encoder.py diff --git a/tests/segmentation_utils_tests.py/flow_reader_test.py b/tests/segmentation_utils_tests.py/flow_reader_test.py new file mode 100644 index 0000000..4a59450 --- /dev/null +++ b/tests/segmentation_utils_tests.py/flow_reader_test.py @@ -0,0 +1,6 @@ +import os +from utilities.segmentation_utils.flowreader import FlowGenerator +from pytest import MonkeyPatch + +def test_flow_generator() -> None: + pass \ No newline at end of file diff --git a/tests/segmentation_utils_tests.py/image_encoder_test.py b/tests/segmentation_utils_tests.py/image_preprocessor_test.py similarity index 88% rename from tests/segmentation_utils_tests.py/image_encoder_test.py rename to tests/segmentation_utils_tests.py/image_preprocessor_test.py index 8da7757..3e19040 100644 --- a/tests/segmentation_utils_tests.py/image_encoder_test.py +++ b/tests/segmentation_utils_tests.py/image_preprocessor_test.py @@ -1,4 +1,4 @@ -from utilities.segmentation_utils.image_encoder import ImagePreprocessor +from utilities.segmentation_utils import ImagePreprocessor import numpy as np def test_image_onehot_encoder()->None: diff --git a/utilities/segmentation_utils/ImagePreprocessor.py b/utilities/segmentation_utils/ImagePreprocessor.py new file mode 100644 index 0000000..c475692 --- /dev/null +++ b/utilities/segmentation_utils/ImagePreprocessor.py @@ -0,0 +1,51 @@ +import numpy as np +import tensorflow as tf + + +def onehot_encode(masks, image_size, num_classes): + """ + Onehot encodes the images coming from the image generator object + Parameters: + ---------- + masks (tf tensor): masks to be onehot encoded + Returns: + ------- + encoded (tf tensor): onehot encoded masks + """ + encoded = np.zeros( + (masks.shape[0], image_size[0] // 2 * image_size[1] // 2, num_classes) + ) + for i in range(num_classes): + encoded[:, :, i] = tf.squeeze((masks == i).astype(int)) + return encoded + + +def augmentation_pipeline(image, mask, input_size, channels=3, seed=0): + """ + Applies augmentation pipeline to the image and mask + Parameters: + ---------- + image (tf tensor): image to be augmented + mask (tf tensor): mask to be augmented + input_size (tuple): size of the input image + Returns: + ------- + image (tf tensor): augmented image + mask (tf tensor): augmented mask + """ + input_size = (input_size[0], input_size[1], channels) + image = tf.image.random_flip_left_right(image, seed=seed) + image = tf.image.random_flip_up_down(image, seed=seed) + image = tf.image.random_brightness(image, 0.2, seed=seed) + image = tf.image.random_contrast(image, 0.8, 1.2, seed=seed) + image = tf.image.random_saturation(image, 0.8, 1.2, seed=seed) + image = tf.image.random_hue(image, 0.2, seed=seed) + mask = tf.image.random_flip_left_right(mask, seed=seed) + mask = tf.image.random_flip_up_down(mask, seed=seed) + return image, mask + + +def flatten(image, input_size, channels=1): + #!not tested + + return tf.reshape(image, (input_size[0] * input_size[1], channels)) diff --git a/utilities/segmentation_utils/flowreader.py b/utilities/segmentation_utils/flowreader.py index b98b66f..ada83f6 100644 --- a/utilities/segmentation_utils/flowreader.py +++ b/utilities/segmentation_utils/flowreader.py @@ -1,9 +1,9 @@ import os import numpy as np from keras.preprocessing.image import ImageDataGenerator -from utilities.segmentation_utils.image_encoder import ImagePreprocessor -#! important: as the I have no clue how we can mount this repo as a package the import is relative to the working directory +from utilities.segmentation_utils import ImagePreprocessor +#! important: as the I have no clue how we can mount this repo as a package the import is relative to the working directory class FlowGenerator: @@ -73,7 +73,7 @@ def __make_generator(self): None """ - + image_datagen = ImageDataGenerator() mask_datagen = ImageDataGenerator() @@ -99,7 +99,7 @@ def __make_generator(self): def get_generator(self): """ - Returns the generator + Returns the generator object Parameters: ---------- @@ -114,7 +114,8 @@ def get_generator(self): def preprocess(self, generator_zip): """ - Preprocessor function to augments the images and masks and onehot encodes the masks + Preprocessor function encapsulates both the image, and mask generator objects. + Augments the images and masks and onehot encodes the masks Parameters: ---------- @@ -128,7 +129,7 @@ def preprocess(self, generator_zip): for i in range(len(img)): image_seed = np.random.randint(0, 100000) img[i], mask[i] = ImagePreprocessor.augmentation_pipeline( - img[i], mask[i], self.image_size,seed=image_seed + img[i], mask[i], self.image_size, seed=image_seed ) mask = ImagePreprocessor.onehot_encode( mask, self.image_size, self.num_classes diff --git a/utilities/segmentation_utils/image_encoder.py b/utilities/segmentation_utils/image_encoder.py deleted file mode 100644 index 7a0c388..0000000 --- a/utilities/segmentation_utils/image_encoder.py +++ /dev/null @@ -1,64 +0,0 @@ -import numpy as np -import tensorflow as tf - - -class ImagePreprocessor: - @classmethod - def onehot_encode(self, masks, image_size, num_classes): - """ - Onehot encodes the images coming from the image generator object - - Parameters: - ---------- - masks (tf tensor): masks to be onehot encoded - - Returns: - ------- - encoded (tf tensor): onehot encoded masks - """ - encoded = np.zeros( - (masks.shape[0], image_size[0] // 2 * image_size[1] // 2, num_classes) - ) - for i in range(num_classes): - encoded[:, :, i] = tf.squeeze((masks == i).astype(int)) - - return encoded - - @classmethod - def augmentation_pipeline(self, image, mask, input_size, channels=3,seed=0): - """ - Applies augmentation pipeline to the image and mask - - Parameters: - ---------- - image (tf tensor): image to be augmented - mask (tf tensor): mask to be augmented - input_size (tuple): size of the input image - - Returns: - ------- - image (tf tensor): augmented image - mask (tf tensor): augmented mask - """ - - input_size = (input_size[0], input_size[1], channels) - - - image = tf.image.random_flip_left_right(image, seed=seed) - image = tf.image.random_flip_up_down(image, seed=seed) - image = tf.image.random_brightness(image, 0.2, seed=seed) - image = tf.image.random_contrast(image, 0.8, 1.2, seed=seed) - image = tf.image.random_saturation(image, 0.8, 1.2, seed=seed) - image = tf.image.random_hue(image, 0.2, seed=seed) - - - mask = tf.image.random_flip_left_right(mask, seed=seed) - mask = tf.image.random_flip_up_down(mask, seed=seed) - - - return image, mask - - @classmethod - def flatten(self, image, input_size, channels=1): - return tf.reshape(image, (input_size[0] * input_size[1], channels)) - From c63f988d281946035ff150dd85fbfb2bf8458666 Mon Sep 17 00:00:00 2001 From: Sajtospoga01 Date: Mon, 13 Mar 2023 19:02:34 +0000 Subject: [PATCH 06/59] adds tests for preprocessing queue --- .../image_preprocessor_test.py | 35 ++++++++++++- .../segmentation_utils/ImagePreprocessor.py | 49 ++++++++++++++----- 2 files changed, 72 insertions(+), 12 deletions(-) diff --git a/tests/segmentation_utils_tests.py/image_preprocessor_test.py b/tests/segmentation_utils_tests.py/image_preprocessor_test.py index 3e19040..bf0f211 100644 --- a/tests/segmentation_utils_tests.py/image_preprocessor_test.py +++ b/tests/segmentation_utils_tests.py/image_preprocessor_test.py @@ -22,4 +22,37 @@ def test_image_onehot_encoder()->None: assert one_hot_image.shape == (1, image_size[0]//2 * image_size[1]//2, n_classes) assert np.array_equal(one_hot_image,onehot_test) - \ No newline at end of file +def test_image_augmentation_pipeline()->None: + # predifining input variables + image = np.zeros((512,512,3)) + mask = np.zeros((256*256,1)) + input_size = (512,512) + seed = 0 + + # createing dummy queues + image_queue = ImagePreprocessor.PreprocessingQueue(queue=[lambda x: x],arguments=dict()) + mask_queue = ImagePreprocessor.PreprocessingQueue(queue=[lambda x: x],arguments=dict()) + + image_new, mask_new = ImagePreprocessor.augmentation_pipeline(image,mask,input_size,image_queue,mask_queue) + + assert image_new.shape == (512,512,3) + assert mask_new.shape == (256*256,1) + + +def test_processing_queue()->None: + # createing dummy queues + image_queue = ImagePreprocessor.PreprocessingQueue(queue=[lambda seed: seed],arguments=[dict(seed=1)]) + mask_queue = ImagePreprocessor.PreprocessingQueue(queue=[lambda seed: seed],arguments=[dict(seed=1)]) + + # changing the seed + new_seed = 5 + image_queue.update_seed(new_seed) + + assert image_queue.arguments[0]["seed"] == new_seed + assert image_queue.queue[0](**image_queue.arguments[0]) == new_seed + + + + + + diff --git a/utilities/segmentation_utils/ImagePreprocessor.py b/utilities/segmentation_utils/ImagePreprocessor.py index c475692..6148857 100644 --- a/utilities/segmentation_utils/ImagePreprocessor.py +++ b/utilities/segmentation_utils/ImagePreprocessor.py @@ -1,5 +1,40 @@ import numpy as np import tensorflow as tf +from dataclasses import dataclass +from typing import Callable + + +@dataclass +class PreprocessingQueue: + """ + object to initialize a preprocessing queue + + Parameters: + ---------- + queue (list): list of functions to be applied + + Returns: + ------- + None + """ + queue: list[Callable] + arguments: list[dict] + + + def update_seed(self, seed): + """ + Changes the seed of the queue + + Parameters: + ---------- + seed (int): seed to be changed to + + Returns: + ------- + None + """ + for i in self.arguments: + i["seed"] = seed def onehot_encode(masks, image_size, num_classes): @@ -20,7 +55,7 @@ def onehot_encode(masks, image_size, num_classes): return encoded -def augmentation_pipeline(image, mask, input_size, channels=3, seed=0): +def augmentation_pipeline(image, mask, input_size,image_queue:PreprocessingQueue,mask_queue:PreprocessingQueue, channels=3): """ Applies augmentation pipeline to the image and mask Parameters: @@ -33,19 +68,11 @@ def augmentation_pipeline(image, mask, input_size, channels=3, seed=0): image (tf tensor): augmented image mask (tf tensor): augmented mask """ - input_size = (input_size[0], input_size[1], channels) - image = tf.image.random_flip_left_right(image, seed=seed) - image = tf.image.random_flip_up_down(image, seed=seed) - image = tf.image.random_brightness(image, 0.2, seed=seed) - image = tf.image.random_contrast(image, 0.8, 1.2, seed=seed) - image = tf.image.random_saturation(image, 0.8, 1.2, seed=seed) - image = tf.image.random_hue(image, 0.2, seed=seed) - mask = tf.image.random_flip_left_right(mask, seed=seed) - mask = tf.image.random_flip_up_down(mask, seed=seed) + + return image, mask def flatten(image, input_size, channels=1): #!not tested - return tf.reshape(image, (input_size[0] * input_size[1], channels)) From e16aa574b0e2eef10592f527ec80cd9388331990 Mon Sep 17 00:00:00 2001 From: Sajtospoga01 Date: Mon, 13 Mar 2023 19:04:33 +0000 Subject: [PATCH 07/59] formats testing queue --- .../image_preprocessor_test.py | 72 +++++++++++-------- 1 file changed, 42 insertions(+), 30 deletions(-) diff --git a/tests/segmentation_utils_tests.py/image_preprocessor_test.py b/tests/segmentation_utils_tests.py/image_preprocessor_test.py index bf0f211..7fa1820 100644 --- a/tests/segmentation_utils_tests.py/image_preprocessor_test.py +++ b/tests/segmentation_utils_tests.py/image_preprocessor_test.py @@ -1,48 +1,66 @@ from utilities.segmentation_utils import ImagePreprocessor import numpy as np -def test_image_onehot_encoder()->None: + +def test_image_onehot_encoder() -> None: # predifining input variables - + n_classes = 2 batch_size = 1 image_size = (256, 256) - + # creating a mask with 2 classes - mask = np.zeros((batch_size,image_size[0]//2 * image_size[1]//2)) - mask[:,::2] = 1 + mask = np.zeros((batch_size, image_size[0] // 2 * image_size[1] // 2)) + mask[:, ::2] = 1 # creating a onehot mask to compare with the output of the function - onehot_test = np.zeros((batch_size,image_size[0]//2 * image_size[1]//2,n_classes)) - onehot_test[:,::2,1] = 1 - onehot_test[:,1::2,0] = 1 - - one_hot_image = ImagePreprocessor.onehot_encode(mask,image_size,n_classes) + onehot_test = np.zeros( + (batch_size, image_size[0] // 2 * image_size[1] // 2, n_classes) + ) + onehot_test[:, ::2, 1] = 1 + onehot_test[:, 1::2, 0] = 1 - assert one_hot_image.shape == (1, image_size[0]//2 * image_size[1]//2, n_classes) - assert np.array_equal(one_hot_image,onehot_test) + one_hot_image = ImagePreprocessor.onehot_encode(mask, image_size, n_classes) -def test_image_augmentation_pipeline()->None: + assert one_hot_image.shape == ( + 1, + image_size[0] // 2 * image_size[1] // 2, + n_classes, + ) + assert np.array_equal(one_hot_image, onehot_test) + + +def test_image_augmentation_pipeline() -> None: # predifining input variables - image = np.zeros((512,512,3)) - mask = np.zeros((256*256,1)) - input_size = (512,512) + image = np.zeros((512, 512, 3)) + mask = np.zeros((256 * 256, 1)) + input_size = (512, 512) seed = 0 # createing dummy queues - image_queue = ImagePreprocessor.PreprocessingQueue(queue=[lambda x: x],arguments=dict()) - mask_queue = ImagePreprocessor.PreprocessingQueue(queue=[lambda x: x],arguments=dict()) + image_queue = ImagePreprocessor.PreprocessingQueue( + queue=[lambda x: x], arguments=dict() + ) + mask_queue = ImagePreprocessor.PreprocessingQueue( + queue=[lambda x: x], arguments=dict() + ) - image_new, mask_new = ImagePreprocessor.augmentation_pipeline(image,mask,input_size,image_queue,mask_queue) + image_new, mask_new = ImagePreprocessor.augmentation_pipeline( + image, mask, input_size, image_queue, mask_queue + ) - assert image_new.shape == (512,512,3) - assert mask_new.shape == (256*256,1) + assert image_new.shape == (512, 512, 3) + assert mask_new.shape == (256 * 256, 1) -def test_processing_queue()->None: +def test_processing_queue() -> None: # createing dummy queues - image_queue = ImagePreprocessor.PreprocessingQueue(queue=[lambda seed: seed],arguments=[dict(seed=1)]) - mask_queue = ImagePreprocessor.PreprocessingQueue(queue=[lambda seed: seed],arguments=[dict(seed=1)]) + image_queue = ImagePreprocessor.PreprocessingQueue( + queue=[lambda seed: seed], arguments=[dict(seed=1)] + ) + mask_queue = ImagePreprocessor.PreprocessingQueue( + queue=[lambda seed: seed], arguments=[dict(seed=1)] + ) # changing the seed new_seed = 5 @@ -50,9 +68,3 @@ def test_processing_queue()->None: assert image_queue.arguments[0]["seed"] == new_seed assert image_queue.queue[0](**image_queue.arguments[0]) == new_seed - - - - - - From 24ecb3cc4ceac6e319d35ce279413e62a57ed5e8 Mon Sep 17 00:00:00 2001 From: Sajtospoga01 Date: Tue, 14 Mar 2023 08:57:16 +0000 Subject: [PATCH 08/59] adds test for main flowreader --- .../flow_reader_test.py | 33 ++++++++++++++++++- 1 file changed, 32 insertions(+), 1 deletion(-) diff --git a/tests/segmentation_utils_tests.py/flow_reader_test.py b/tests/segmentation_utils_tests.py/flow_reader_test.py index 4a59450..e64379d 100644 --- a/tests/segmentation_utils_tests.py/flow_reader_test.py +++ b/tests/segmentation_utils_tests.py/flow_reader_test.py @@ -1,6 +1,37 @@ import os from utilities.segmentation_utils.flowreader import FlowGenerator +from keras.preprocessing.image import ImageDataGenerator from pytest import MonkeyPatch +import numpy as np + +def flow_from_directory_mock(*args, **kwargs): + channels = 3 + if "color_mode" in kwargs and kwargs["color_mode"] == "grayscale": + channels = 1 + + batch = np.zeros((2, kwargs["target_size"][0],kwargs["target_size"][1], channels)) + return batch + + def test_flow_generator() -> None: - pass \ No newline at end of file + patch = MonkeyPatch() + # mock an imagedatagenerator from keras + mock_image_datagen = patch.setattr( + ImageDataGenerator, + "flow_from_directory", + flow_from_directory_mock, + ) + # mock an imagedatagenerator from keras + + + # create a flow generator + flow_generator = FlowGenerator( + image_path="tests/segmentation_utils_tests/flow_reader_test", + mask_path="tests/segmentation_utils_tests/flow_reader_test", + image_size=(512, 512), + num_classes=7, + shuffle=True, + batch_size=2, + seed=909, + ) From a2086ec2c85c561a8cc9fcfd926974f18bd82097 Mon Sep 17 00:00:00 2001 From: Sajtospoga01 Date: Tue, 14 Mar 2023 08:59:54 +0000 Subject: [PATCH 09/59] remove dependency of main flowreader test on preprocess function --- tests/segmentation_utils_tests.py/flow_reader_test.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/segmentation_utils_tests.py/flow_reader_test.py b/tests/segmentation_utils_tests.py/flow_reader_test.py index e64379d..fd08195 100644 --- a/tests/segmentation_utils_tests.py/flow_reader_test.py +++ b/tests/segmentation_utils_tests.py/flow_reader_test.py @@ -22,6 +22,7 @@ def test_flow_generator() -> None: "flow_from_directory", flow_from_directory_mock, ) + patch.setattr(FlowGenerator, "preprocess", lambda self,x: x) # mock an imagedatagenerator from keras From 87a3db7e8943d529d0c5759cb456d72028d42cd9 Mon Sep 17 00:00:00 2001 From: Sajtospoga01 Date: Tue, 14 Mar 2023 09:11:01 +0000 Subject: [PATCH 10/59] adds test for preprocessing function in flowgenerator --- .../flow_reader_test.py | 38 ++++++++++++++++--- 1 file changed, 33 insertions(+), 5 deletions(-) diff --git a/tests/segmentation_utils_tests.py/flow_reader_test.py b/tests/segmentation_utils_tests.py/flow_reader_test.py index fd08195..8a8932d 100644 --- a/tests/segmentation_utils_tests.py/flow_reader_test.py +++ b/tests/segmentation_utils_tests.py/flow_reader_test.py @@ -1,20 +1,21 @@ import os from utilities.segmentation_utils.flowreader import FlowGenerator from keras.preprocessing.image import ImageDataGenerator +from utilities.segmentation_utils import ImagePreprocessor from pytest import MonkeyPatch import numpy as np + def flow_from_directory_mock(*args, **kwargs): channels = 3 if "color_mode" in kwargs and kwargs["color_mode"] == "grayscale": channels = 1 - batch = np.zeros((2, kwargs["target_size"][0],kwargs["target_size"][1], channels)) + batch = np.zeros((2, kwargs["target_size"][0], kwargs["target_size"][1], channels)) return batch - -def test_flow_generator() -> None: +def test_makes_flow_generator() -> None: patch = MonkeyPatch() # mock an imagedatagenerator from keras mock_image_datagen = patch.setattr( @@ -22,10 +23,32 @@ def test_flow_generator() -> None: "flow_from_directory", flow_from_directory_mock, ) - patch.setattr(FlowGenerator, "preprocess", lambda self,x: x) - # mock an imagedatagenerator from keras + patch.setattr(FlowGenerator, "preprocess", lambda self, x: x) + + # create a flow generator + flow_generator = FlowGenerator( + image_path="tests/segmentation_utils_tests/flow_reader_test", + mask_path="tests/segmentation_utils_tests/flow_reader_test", + image_size=(512, 512), + num_classes=7, + shuffle=True, + batch_size=2, + seed=909, + ) + pass +def test_flow_generator_with_preprocess() -> None: + patch = MonkeyPatch() + # mock an imagedatagenerator from keras + mock_image_datagen = patch.setattr( + ImageDataGenerator, + "flow_from_directory", + flow_from_directory_mock, + ) + # mock external dependencies + patch.setattr(ImagePreprocessor, "augmentation_pipeline", lambda x, y, z, a, b: (x, y)) + patch.setattr(ImagePreprocessor, "onehot_encode", lambda x, y, z: np.rollaxis(np.array([x for i in range(z)]),0,3)) # create a flow generator flow_generator = FlowGenerator( image_path="tests/segmentation_utils_tests/flow_reader_test", @@ -36,3 +59,8 @@ def test_flow_generator() -> None: batch_size=2, seed=909, ) + patch.undo() + patch.undo() + + + From 3d2a4d796d2c64dedddac12ca48655817e3d6b96 Mon Sep 17 00:00:00 2001 From: Sajtospoga01 Date: Tue, 14 Mar 2023 09:12:50 +0000 Subject: [PATCH 11/59] add todo to flow reader test --- tests/segmentation_utils_tests.py/flow_reader_test.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/segmentation_utils_tests.py/flow_reader_test.py b/tests/segmentation_utils_tests.py/flow_reader_test.py index 8a8932d..5c572a1 100644 --- a/tests/segmentation_utils_tests.py/flow_reader_test.py +++ b/tests/segmentation_utils_tests.py/flow_reader_test.py @@ -62,5 +62,7 @@ def test_flow_generator_with_preprocess() -> None: patch.undo() patch.undo() - + +# TODO: test get dataset size function +# TODO: test get generator function From 4411ea25207f267815f6de1f2bcdbc15b7e419d8 Mon Sep 17 00:00:00 2001 From: Sajtospoga01 Date: Tue, 14 Mar 2023 12:38:44 +0000 Subject: [PATCH 12/59] adds tests for the processing pipeline, adds functionality for the augmentation pipeline to run transforms sequentially --- .../image_preprocessor_test.py | 19 +++- .../segmentation_utils/ImagePreprocessor.py | 93 ++++++++++++++++++- 2 files changed, 104 insertions(+), 8 deletions(-) diff --git a/tests/segmentation_utils_tests.py/image_preprocessor_test.py b/tests/segmentation_utils_tests.py/image_preprocessor_test.py index 7fa1820..3ad59b5 100644 --- a/tests/segmentation_utils_tests.py/image_preprocessor_test.py +++ b/tests/segmentation_utils_tests.py/image_preprocessor_test.py @@ -39,10 +39,10 @@ def test_image_augmentation_pipeline() -> None: # createing dummy queues image_queue = ImagePreprocessor.PreprocessingQueue( - queue=[lambda x: x], arguments=dict() + queue=[lambda x,y,seed: x], arguments=[{"y": 1}] ) mask_queue = ImagePreprocessor.PreprocessingQueue( - queue=[lambda x: x], arguments=dict() + queue=[lambda x,y,seed: x], arguments=[{"y": 1}] ) image_new, mask_new = ImagePreprocessor.augmentation_pipeline( @@ -67,4 +67,17 @@ def test_processing_queue() -> None: image_queue.update_seed(new_seed) assert image_queue.arguments[0]["seed"] == new_seed - assert image_queue.queue[0](**image_queue.arguments[0]) == new_seed + + +def test_generate_default_queue() -> None: + # createing default queues + image_queue, mask_queue = ImagePreprocessor.generate_default_queue() + + # changing the seed + new_seed = 5 + image_queue.update_seed(new_seed) + + assert image_queue.arguments[0]["seed"] == new_seed + + assert image_queue.get_queue_length() == 6 + assert mask_queue.get_queue_length() == 2 diff --git a/utilities/segmentation_utils/ImagePreprocessor.py b/utilities/segmentation_utils/ImagePreprocessor.py index 6148857..172d51c 100644 --- a/utilities/segmentation_utils/ImagePreprocessor.py +++ b/utilities/segmentation_utils/ImagePreprocessor.py @@ -1,7 +1,7 @@ import numpy as np import tensorflow as tf from dataclasses import dataclass -from typing import Callable +from typing import Callable,Dict @dataclass @@ -18,8 +18,7 @@ class PreprocessingQueue: None """ queue: list[Callable] - arguments: list[dict] - + arguments: list[Dict] def update_seed(self, seed): """ @@ -36,6 +35,65 @@ def update_seed(self, seed): for i in self.arguments: i["seed"] = seed + def get_queue_length(self): + """ + Returns the length of the queue + + Parameters: + ---------- + None + + Returns: + ------- + int: length of the queue + """ + return len(self.queue) + + + + +def generate_default_queue(seed = 0): + """ + Generates the default processing queue + + Parameters: + ---------- + None + + Returns: + ------- + PreprocessingQueue: default queue + """ + image_queue = PreprocessingQueue( + queue=[ + tf.image.random_flip_left_right, + tf.image.random_flip_up_down, + tf.image.random_brightness, + tf.image.random_contrast, + tf.image.random_saturation, + tf.image.random_hue, + ], + arguments=[ + {"seed": seed}, + {"seed": seed}, + {"max_delta": 0.2, "seed": seed}, + {"lower": 0.8, "upper": 1.2, "seed": seed}, + {"lower": 0.8, "upper": 1.2, "seed": seed}, + {"max_delta": 0.2, "seed": seed}, + ], + ) + mask_queue = PreprocessingQueue( + queue=[ + tf.image.random_flip_left_right, + tf.image.random_flip_up_down, + ], + arguments=[ + {"seed": seed}, + {"seed": seed}, + ], + ) + return image_queue,mask_queue + def onehot_encode(masks, image_size, num_classes): """ @@ -55,20 +113,45 @@ def onehot_encode(masks, image_size, num_classes): return encoded -def augmentation_pipeline(image, mask, input_size,image_queue:PreprocessingQueue,mask_queue:PreprocessingQueue, channels=3): +def augmentation_pipeline(image, mask, input_size,image_queue:PreprocessingQueue = None,mask_queue:PreprocessingQueue = None, channels=3,seed = 0): """ Applies augmentation pipeline to the image and mask + If no queue is passed a default processing queue is created + Parameters: ---------- image (tf tensor): image to be augmented mask (tf tensor): mask to be augmented input_size (tuple): size of the input image + + Keyword Arguments: + ----------------- + image_queue (PreprocessingQueue): queue of image processing functions + mask_queue (PreprocessingQueue): queue of mask processing functions + channels (int): number of channels in the image + + Raises: + ------ + ValueError: if only one queue is passed + Returns: ------- image (tf tensor): augmented image mask (tf tensor): augmented mask """ - + image_queue.update_seed(seed) + mask_queue.update_seed(seed) + + if image_queue == None and mask_queue == None: + image_queue, mask_queue = generate_default_queue() + elif image_queue == None or mask_queue == None: + raise ValueError("Both queues must be passed or none") + + for i,fun in enumerate(image_queue.queue): + image = fun(image, **image_queue.arguments[i]) + + for i,fun in enumerate(mask_queue.queue): + mask = fun(mask, **mask_queue.arguments[i]) return image, mask From 6d5941d3daff60c83cb434414f6412de8e766a25 Mon Sep 17 00:00:00 2001 From: Sajtospoga01 Date: Tue, 14 Mar 2023 12:50:02 +0000 Subject: [PATCH 13/59] updates preprocessing pipeline to reshape images in the pipeline for appropriate processing --- .../image_preprocessor_test.py | 8 +++++- .../segmentation_utils/ImagePreprocessor.py | 27 ++++++++++++++++--- 2 files changed, 31 insertions(+), 4 deletions(-) diff --git a/tests/segmentation_utils_tests.py/image_preprocessor_test.py b/tests/segmentation_utils_tests.py/image_preprocessor_test.py index 3ad59b5..fcb66a2 100644 --- a/tests/segmentation_utils_tests.py/image_preprocessor_test.py +++ b/tests/segmentation_utils_tests.py/image_preprocessor_test.py @@ -35,6 +35,7 @@ def test_image_augmentation_pipeline() -> None: image = np.zeros((512, 512, 3)) mask = np.zeros((256 * 256, 1)) input_size = (512, 512) + output_size = (256, 256) seed = 0 # createing dummy queues @@ -46,7 +47,7 @@ def test_image_augmentation_pipeline() -> None: ) image_new, mask_new = ImagePreprocessor.augmentation_pipeline( - image, mask, input_size, image_queue, mask_queue + image, mask, input_size,output_size, image_queue, mask_queue ) assert image_new.shape == (512, 512, 3) @@ -81,3 +82,8 @@ def test_generate_default_queue() -> None: assert image_queue.get_queue_length() == 6 assert mask_queue.get_queue_length() == 2 + +def test_flatten()->None: + image = np.zeros((512,512,3)) + image = ImagePreprocessor.flatten(image,(512,512),3) + assert image.shape == (512*512,3) \ No newline at end of file diff --git a/utilities/segmentation_utils/ImagePreprocessor.py b/utilities/segmentation_utils/ImagePreprocessor.py index 172d51c..07d66bc 100644 --- a/utilities/segmentation_utils/ImagePreprocessor.py +++ b/utilities/segmentation_utils/ImagePreprocessor.py @@ -113,7 +113,7 @@ def onehot_encode(masks, image_size, num_classes): return encoded -def augmentation_pipeline(image, mask, input_size,image_queue:PreprocessingQueue = None,mask_queue:PreprocessingQueue = None, channels=3,seed = 0): +def augmentation_pipeline(image, mask, input_size,output_size,image_queue:PreprocessingQueue = None,mask_queue:PreprocessingQueue = None, channels=3,seed = 0): """ Applies augmentation pipeline to the image and mask If no queue is passed a default processing queue is created @@ -123,6 +123,7 @@ def augmentation_pipeline(image, mask, input_size,image_queue:PreprocessingQueue image (tf tensor): image to be augmented mask (tf tensor): mask to be augmented input_size (tuple): size of the input image + output_size (tuple): size of the output image Keyword Arguments: ----------------- @@ -141,7 +142,10 @@ def augmentation_pipeline(image, mask, input_size,image_queue:PreprocessingQueue """ image_queue.update_seed(seed) mask_queue.update_seed(seed) - + + #reshapes masks, such that transforamtions work properly + mask = tf.reshape(mask, output_size) + if image_queue == None and mask_queue == None: image_queue, mask_queue = generate_default_queue() elif image_queue == None or mask_queue == None: @@ -153,9 +157,26 @@ def augmentation_pipeline(image, mask, input_size,image_queue:PreprocessingQueue for i,fun in enumerate(mask_queue.queue): mask = fun(mask, **mask_queue.arguments[i]) + #flattens masks out to the correct output shape + mask = flatten(mask, output_size, channels=1) return image, mask def flatten(image, input_size, channels=1): - #!not tested + """ + Flattens an input image, with reserving the channels + + Parameters: + ---------- + image (tf tensor): image to be flattened + input_size (tuple): size of the input image + + Keyword Arguments: + ----------------- + channels (int): number of channels in the image + + Returns: + ------- + image (tf tensor): flattened image + """ return tf.reshape(image, (input_size[0] * input_size[1], channels)) From 7845371fbdbf5f70622ac1f8cd8713e47f69b65d Mon Sep 17 00:00:00 2001 From: Sajtospoga01 Date: Tue, 14 Mar 2023 13:07:38 +0000 Subject: [PATCH 14/59] adds tests for flow generator --- .../flow_reader_test.py | 68 +++++++++++++++++-- .../image_preprocessor_test.py | 15 ++-- 2 files changed, 72 insertions(+), 11 deletions(-) diff --git a/tests/segmentation_utils_tests.py/flow_reader_test.py b/tests/segmentation_utils_tests.py/flow_reader_test.py index 5c572a1..29239ee 100644 --- a/tests/segmentation_utils_tests.py/flow_reader_test.py +++ b/tests/segmentation_utils_tests.py/flow_reader_test.py @@ -47,8 +47,14 @@ def test_flow_generator_with_preprocess() -> None: flow_from_directory_mock, ) # mock external dependencies - patch.setattr(ImagePreprocessor, "augmentation_pipeline", lambda x, y, z, a, b: (x, y)) - patch.setattr(ImagePreprocessor, "onehot_encode", lambda x, y, z: np.rollaxis(np.array([x for i in range(z)]),0,3)) + patch.setattr( + ImagePreprocessor, "augmentation_pipeline", lambda x, y, z, a, b: (x, y) + ) + patch.setattr( + ImagePreprocessor, + "onehot_encode", + lambda x, y, z: np.rollaxis(np.array([x for i in range(z)]), 0, 3), + ) # create a flow generator flow_generator = FlowGenerator( image_path="tests/segmentation_utils_tests/flow_reader_test", @@ -63,6 +69,60 @@ def test_flow_generator_with_preprocess() -> None: patch.undo() -# TODO: test get dataset size function -# TODO: test get generator function +def test_get_dataset_size() -> None: + patch = MonkeyPatch() + patch.setattr(os, "listdir", lambda x: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) + # mock an imagedatagenerator from keras + mock_image_datagen = patch.setattr( + ImageDataGenerator, + "flow_from_directory", + flow_from_directory_mock, + ) + # mock external dependencies + patch.setattr( + ImagePreprocessor, "augmentation_pipeline", lambda x, y, z, a, b: (x, y) + ) + patch.setattr( + ImagePreprocessor, + "onehot_encode", + lambda x, y, z: np.rollaxis(np.array([x for i in range(z)]), 0, 3), + ) + # create a flow generator + flow_generator = FlowGenerator( + image_path="tests/segmentation_utils_tests/flow_reader_test", + mask_path="tests/segmentation_utils_tests/flow_reader_test", + image_size=(512, 512), + num_classes=7, + shuffle=True, + batch_size=2, + ) + size = flow_generator.get_dataset_size() + assert size == 10 + patch.undo() + patch.undo() + patch.undo() +def test_get_generator() -> None: + patch = MonkeyPatch() + # mock external dependencies + patch.setattr( + ImagePreprocessor, "augmentation_pipeline", lambda x, y, z, a, b: (x, y) + ) + patch.setattr( + ImagePreprocessor, + "onehot_encode", + lambda x, y, z: np.rollaxis(np.array([x for i in range(z)]), 0, 3), + ) + # create a flow generator + flow_generator = FlowGenerator( + image_path="tests/segmentation_utils_tests/flow_reader_test", + mask_path="tests/segmentation_utils_tests/flow_reader_test", + image_size=(512, 512), + num_classes=7, + shuffle=True, + batch_size=2, + ) + generator = flow_generator.get_generator() + assert generator + patch.undo() + patch.undo() \ No newline at end of file diff --git a/tests/segmentation_utils_tests.py/image_preprocessor_test.py b/tests/segmentation_utils_tests.py/image_preprocessor_test.py index fcb66a2..6b1946f 100644 --- a/tests/segmentation_utils_tests.py/image_preprocessor_test.py +++ b/tests/segmentation_utils_tests.py/image_preprocessor_test.py @@ -40,14 +40,14 @@ def test_image_augmentation_pipeline() -> None: # createing dummy queues image_queue = ImagePreprocessor.PreprocessingQueue( - queue=[lambda x,y,seed: x], arguments=[{"y": 1}] + queue=[lambda x, y, seed: x], arguments=[{"y": 1}] ) mask_queue = ImagePreprocessor.PreprocessingQueue( - queue=[lambda x,y,seed: x], arguments=[{"y": 1}] + queue=[lambda x, y, seed: x], arguments=[{"y": 1}] ) image_new, mask_new = ImagePreprocessor.augmentation_pipeline( - image, mask, input_size,output_size, image_queue, mask_queue + image, mask, input_size, output_size, image_queue, mask_queue ) assert image_new.shape == (512, 512, 3) @@ -83,7 +83,8 @@ def test_generate_default_queue() -> None: assert image_queue.get_queue_length() == 6 assert mask_queue.get_queue_length() == 2 -def test_flatten()->None: - image = np.zeros((512,512,3)) - image = ImagePreprocessor.flatten(image,(512,512),3) - assert image.shape == (512*512,3) \ No newline at end of file + +def test_flatten() -> None: + image = np.zeros((512, 512, 3)) + image = ImagePreprocessor.flatten(image, (512, 512), 3) + assert image.shape == (512 * 512, 3) From b6e7ce98abc540b5b1266e2c122cfc71e18c7ec8 Mon Sep 17 00:00:00 2001 From: Sajtospoga01 Date: Wed, 15 Mar 2023 14:19:54 +0000 Subject: [PATCH 15/59] updates flowreader, and imagepreprocessor to accept both column tensor and matrix tensor. Updates tests accordingly --- .../flow_reader_test.py | 4 ++ .../image_preprocessor_test.py | 38 +++++++++++--- .../segmentation_utils/ImagePreprocessor.py | 49 +++++++++++++------ utilities/segmentation_utils/flowreader.py | 16 ++++-- 4 files changed, 83 insertions(+), 24 deletions(-) diff --git a/tests/segmentation_utils_tests.py/flow_reader_test.py b/tests/segmentation_utils_tests.py/flow_reader_test.py index 29239ee..e239f14 100644 --- a/tests/segmentation_utils_tests.py/flow_reader_test.py +++ b/tests/segmentation_utils_tests.py/flow_reader_test.py @@ -30,6 +30,7 @@ def test_makes_flow_generator() -> None: image_path="tests/segmentation_utils_tests/flow_reader_test", mask_path="tests/segmentation_utils_tests/flow_reader_test", image_size=(512, 512), + output_size=(256*256,1), num_classes=7, shuffle=True, batch_size=2, @@ -60,6 +61,7 @@ def test_flow_generator_with_preprocess() -> None: image_path="tests/segmentation_utils_tests/flow_reader_test", mask_path="tests/segmentation_utils_tests/flow_reader_test", image_size=(512, 512), + output_size=(256*256,1), num_classes=7, shuffle=True, batch_size=2, @@ -92,6 +94,7 @@ def test_get_dataset_size() -> None: image_path="tests/segmentation_utils_tests/flow_reader_test", mask_path="tests/segmentation_utils_tests/flow_reader_test", image_size=(512, 512), + output_size=(256*256,1), num_classes=7, shuffle=True, batch_size=2, @@ -118,6 +121,7 @@ def test_get_generator() -> None: image_path="tests/segmentation_utils_tests/flow_reader_test", mask_path="tests/segmentation_utils_tests/flow_reader_test", image_size=(512, 512), + output_size=(256*256,1), num_classes=7, shuffle=True, batch_size=2, diff --git a/tests/segmentation_utils_tests.py/image_preprocessor_test.py b/tests/segmentation_utils_tests.py/image_preprocessor_test.py index 6b1946f..6142c57 100644 --- a/tests/segmentation_utils_tests.py/image_preprocessor_test.py +++ b/tests/segmentation_utils_tests.py/image_preprocessor_test.py @@ -7,20 +7,22 @@ def test_image_onehot_encoder() -> None: n_classes = 2 batch_size = 1 - image_size = (256, 256) + image_size = (512, 512) + output_size = (256,256) + # creating a mask with 2 classes - mask = np.zeros((batch_size, image_size[0] // 2 * image_size[1] // 2)) + mask = np.zeros((batch_size, output_size[0] * output_size[1])) mask[:, ::2] = 1 # creating a onehot mask to compare with the output of the function onehot_test = np.zeros( - (batch_size, image_size[0] // 2 * image_size[1] // 2, n_classes) + (batch_size, output_size[0] * output_size[1], n_classes) ) onehot_test[:, ::2, 1] = 1 onehot_test[:, 1::2, 0] = 1 - one_hot_image = ImagePreprocessor.onehot_encode(mask, image_size, n_classes) + one_hot_image = ImagePreprocessor.onehot_encode(mask, output_size, n_classes) assert one_hot_image.shape == ( 1, @@ -30,12 +32,12 @@ def test_image_onehot_encoder() -> None: assert np.array_equal(one_hot_image, onehot_test) -def test_image_augmentation_pipeline() -> None: +def test_image_augmentation_pipeline_column() -> None: # predifining input variables image = np.zeros((512, 512, 3)) mask = np.zeros((256 * 256, 1)) input_size = (512, 512) - output_size = (256, 256) + output_size = (256 * 256, 1) seed = 0 # createing dummy queues @@ -54,6 +56,30 @@ def test_image_augmentation_pipeline() -> None: assert mask_new.shape == (256 * 256, 1) +def test_image_augmentation_pipeline_squarematrix() -> None: + # predifining input variables + image = np.zeros((512, 512, 3)) + mask = np.zeros((256, 256, 1)) + input_size = (512, 512) + output_size = (256, 256) + seed = 0 + + # createing dummy queues + image_queue = ImagePreprocessor.PreprocessingQueue( + queue=[lambda x, y, seed: x], arguments=[{"y": 1}] + ) + mask_queue = ImagePreprocessor.PreprocessingQueue( + queue=[lambda x, y, seed: x], arguments=[{"y": 1}] + ) + + image_new, mask_new = ImagePreprocessor.augmentation_pipeline( + image, mask, input_size, output_size, image_queue, mask_queue + ) + + assert image_new.shape == (512, 512, 3) + assert mask_new.shape == (256, 256, 1) + + def test_processing_queue() -> None: # createing dummy queues image_queue = ImagePreprocessor.PreprocessingQueue( diff --git a/utilities/segmentation_utils/ImagePreprocessor.py b/utilities/segmentation_utils/ImagePreprocessor.py index 07d66bc..6765d5f 100644 --- a/utilities/segmentation_utils/ImagePreprocessor.py +++ b/utilities/segmentation_utils/ImagePreprocessor.py @@ -1,7 +1,7 @@ import numpy as np import tensorflow as tf from dataclasses import dataclass -from typing import Callable,Dict +from typing import Callable, Dict @dataclass @@ -17,6 +17,7 @@ class PreprocessingQueue: ------- None """ + queue: list[Callable] arguments: list[Dict] @@ -48,11 +49,9 @@ def get_queue_length(self): int: length of the queue """ return len(self.queue) - - -def generate_default_queue(seed = 0): +def generate_default_queue(seed=0): """ Generates the default processing queue @@ -60,6 +59,10 @@ def generate_default_queue(seed = 0): ---------- None + Keyword Arguments: + ----------------- + seed (int): seed to be used for the random functions + Returns: ------- PreprocessingQueue: default queue @@ -92,28 +95,40 @@ def generate_default_queue(seed = 0): {"seed": seed}, ], ) - return image_queue,mask_queue + return image_queue, mask_queue -def onehot_encode(masks, image_size, num_classes): +def onehot_encode(masks, output_size, num_classes): """ Onehot encodes the images coming from the image generator object Parameters: ---------- masks (tf tensor): masks to be onehot encoded + output_size (tuple): size of the output image, it is specified as (height, width) #!Note that for a column vector the width is 1 + num_classes (int): number of classes in the mask, to be onehot encoded + Returns: ------- encoded (tf tensor): onehot encoded masks """ encoded = np.zeros( - (masks.shape[0], image_size[0] // 2 * image_size[1] // 2, num_classes) + (masks.shape[0], output_size[0] * output_size[1] , num_classes) ) for i in range(num_classes): encoded[:, :, i] = tf.squeeze((masks == i).astype(int)) return encoded -def augmentation_pipeline(image, mask, input_size,output_size,image_queue:PreprocessingQueue = None,mask_queue:PreprocessingQueue = None, channels=3,seed = 0): +def augmentation_pipeline( + image, + mask, + input_size, + output_size, + image_queue: PreprocessingQueue = None, + mask_queue: PreprocessingQueue = None, + channels=3, + seed=0, +): """ Applies augmentation pipeline to the image and mask If no queue is passed a default processing queue is created @@ -130,6 +145,7 @@ def augmentation_pipeline(image, mask, input_size,output_size,image_queue:Prepro image_queue (PreprocessingQueue): queue of image processing functions mask_queue (PreprocessingQueue): queue of mask processing functions channels (int): number of channels in the image + seed (int): seed to be used for the random functions Raises: ------ @@ -143,22 +159,25 @@ def augmentation_pipeline(image, mask, input_size,output_size,image_queue:Prepro image_queue.update_seed(seed) mask_queue.update_seed(seed) - #reshapes masks, such that transforamtions work properly - mask = tf.reshape(mask, output_size) + + # reshapes masks, such that transforamtions work properly + if output_size[1] == 1: + mask = tf.reshape(mask, output_size) if image_queue == None and mask_queue == None: image_queue, mask_queue = generate_default_queue() elif image_queue == None or mask_queue == None: raise ValueError("Both queues must be passed or none") - for i,fun in enumerate(image_queue.queue): + for i, fun in enumerate(image_queue.queue): image = fun(image, **image_queue.arguments[i]) - for i,fun in enumerate(mask_queue.queue): + for i, fun in enumerate(mask_queue.queue): mask = fun(mask, **mask_queue.arguments[i]) - #flattens masks out to the correct output shape - mask = flatten(mask, output_size, channels=1) + # flattens masks out to the correct output shape + if output_size[1] == 1: + mask = flatten(mask, output_size, channels=1) return image, mask @@ -170,7 +189,7 @@ def flatten(image, input_size, channels=1): ---------- image (tf tensor): image to be flattened input_size (tuple): size of the input image - + Keyword Arguments: ----------------- channels (int): number of channels in the image diff --git a/utilities/segmentation_utils/flowreader.py b/utilities/segmentation_utils/flowreader.py index ada83f6..47b24fe 100644 --- a/utilities/segmentation_utils/flowreader.py +++ b/utilities/segmentation_utils/flowreader.py @@ -12,6 +12,7 @@ def __init__( image_path, mask_path, image_size, + output_size, num_classes, shuffle=True, batch_size=32, @@ -26,6 +27,9 @@ def __init__( mask (string): path to the mask directory batch_size (int): batch size image_size (tuple): image size + output_size (tuple): output size #! Note: in case the output is a column vector it has to be in the shape (x, 1) + #TODO: check if parameter format is correct + num_classes (int): number of classes shuffle (bool): whether to shuffle the dataset or not @@ -34,10 +38,16 @@ def __init__( None """ + if len(output_size)!=2: + raise ValueError("The output size has to be a tuple of length 2") + elif output_size[1] != 1 and output_size[0] != output_size[1]: + raise ValueError("The output size has to be a square matrix or a column vector") + self.image_path = image_path self.mask_path = mask_path self.batch_size = batch_size self.image_size = image_size + self.output_size = output_size self.num_classes = num_classes self.shuffle = shuffle self.seed = seed @@ -90,7 +100,7 @@ def __make_generator(self): class_mode=None, seed=self.seed, batch_size=self.batch_size, - target_size=(self.image_size[0] // 2 * self.image_size[1] // 2, 1), + target_size=self.output_size, color_mode="grayscale", ) @@ -129,9 +139,9 @@ def preprocess(self, generator_zip): for i in range(len(img)): image_seed = np.random.randint(0, 100000) img[i], mask[i] = ImagePreprocessor.augmentation_pipeline( - img[i], mask[i], self.image_size, seed=image_seed + img[i], mask[i], self.image_size,self.output_size, seed=image_seed ) mask = ImagePreprocessor.onehot_encode( - mask, self.image_size, self.num_classes + mask, self.output_size, self.num_classes ) yield (img, mask) From c4dc9dd3cf83c1ee1800dd208da4891ca6aa957c Mon Sep 17 00:00:00 2001 From: Sajtospoga01 Date: Wed, 15 Mar 2023 14:58:06 +0000 Subject: [PATCH 16/59] abstract lambda mock functions and generator arguments to variable for reusability --- .../flow_reader_test.py | 81 +++++++------------ 1 file changed, 29 insertions(+), 52 deletions(-) diff --git a/tests/segmentation_utils_tests.py/flow_reader_test.py b/tests/segmentation_utils_tests.py/flow_reader_test.py index e239f14..aec09eb 100644 --- a/tests/segmentation_utils_tests.py/flow_reader_test.py +++ b/tests/segmentation_utils_tests.py/flow_reader_test.py @@ -15,6 +15,21 @@ def flow_from_directory_mock(*args, **kwargs): return batch +generator_args = { + "image_path": "tests/segmentation_utils_tests/flow_reader_test", + "mask_path": "tests/segmentation_utils_tests/flow_reader_test", + "image_size": (512, 512), + "output_size": (256 * 256, 1), + "num_classes": 7, + "shuffle": True, + "batch_size": 2, + "seed": 909, +} + +mock_onehot_fn = lambda x, y, z: np.rollaxis(np.array([x for i in range(z)]), 0, 3) +mock_augmentation_fn = lambda x, y, z, a, b: (x, y) + + def test_makes_flow_generator() -> None: patch = MonkeyPatch() # mock an imagedatagenerator from keras @@ -26,16 +41,7 @@ def test_makes_flow_generator() -> None: patch.setattr(FlowGenerator, "preprocess", lambda self, x: x) # create a flow generator - flow_generator = FlowGenerator( - image_path="tests/segmentation_utils_tests/flow_reader_test", - mask_path="tests/segmentation_utils_tests/flow_reader_test", - image_size=(512, 512), - output_size=(256*256,1), - num_classes=7, - shuffle=True, - batch_size=2, - seed=909, - ) + flow_generator = FlowGenerator(**generator_args) pass @@ -48,25 +54,15 @@ def test_flow_generator_with_preprocess() -> None: flow_from_directory_mock, ) # mock external dependencies - patch.setattr( - ImagePreprocessor, "augmentation_pipeline", lambda x, y, z, a, b: (x, y) - ) + patch.setattr(ImagePreprocessor, "augmentation_pipeline", mock_augmentation_fn) patch.setattr( ImagePreprocessor, "onehot_encode", - lambda x, y, z: np.rollaxis(np.array([x for i in range(z)]), 0, 3), + mock_onehot_fn, ) + # create a flow generator - flow_generator = FlowGenerator( - image_path="tests/segmentation_utils_tests/flow_reader_test", - mask_path="tests/segmentation_utils_tests/flow_reader_test", - image_size=(512, 512), - output_size=(256*256,1), - num_classes=7, - shuffle=True, - batch_size=2, - seed=909, - ) + flow_generator = FlowGenerator(**generator_args) patch.undo() patch.undo() @@ -74,59 +70,40 @@ def test_flow_generator_with_preprocess() -> None: def test_get_dataset_size() -> None: patch = MonkeyPatch() patch.setattr(os, "listdir", lambda x: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) - # mock an imagedatagenerator from keras + # mock an imagedatagenerator from keras mock_image_datagen = patch.setattr( ImageDataGenerator, "flow_from_directory", flow_from_directory_mock, ) # mock external dependencies - patch.setattr( - ImagePreprocessor, "augmentation_pipeline", lambda x, y, z, a, b: (x, y) - ) + patch.setattr(ImagePreprocessor, "augmentation_pipeline", mock_augmentation_fn) patch.setattr( ImagePreprocessor, "onehot_encode", - lambda x, y, z: np.rollaxis(np.array([x for i in range(z)]), 0, 3), + mock_onehot_fn, ) # create a flow generator - flow_generator = FlowGenerator( - image_path="tests/segmentation_utils_tests/flow_reader_test", - mask_path="tests/segmentation_utils_tests/flow_reader_test", - image_size=(512, 512), - output_size=(256*256,1), - num_classes=7, - shuffle=True, - batch_size=2, - ) + flow_generator = FlowGenerator(**generator_args) size = flow_generator.get_dataset_size() assert size == 10 patch.undo() patch.undo() patch.undo() + def test_get_generator() -> None: patch = MonkeyPatch() # mock external dependencies - patch.setattr( - ImagePreprocessor, "augmentation_pipeline", lambda x, y, z, a, b: (x, y) - ) + patch.setattr(ImagePreprocessor, "augmentation_pipeline", mock_augmentation_fn) patch.setattr( ImagePreprocessor, "onehot_encode", - lambda x, y, z: np.rollaxis(np.array([x for i in range(z)]), 0, 3), + mock_onehot_fn, ) # create a flow generator - flow_generator = FlowGenerator( - image_path="tests/segmentation_utils_tests/flow_reader_test", - mask_path="tests/segmentation_utils_tests/flow_reader_test", - image_size=(512, 512), - output_size=(256*256,1), - num_classes=7, - shuffle=True, - batch_size=2, - ) + flow_generator = FlowGenerator(**generator_args) generator = flow_generator.get_generator() assert generator patch.undo() - patch.undo() \ No newline at end of file + patch.undo() From 226876f0f17ec2976c17abfddc1aa798d1f9114d Mon Sep 17 00:00:00 2001 From: Sajtospoga01 Date: Wed, 15 Mar 2023 15:00:49 +0000 Subject: [PATCH 17/59] update condition for get_generator test, adds extra comments --- tests/segmentation_utils_tests.py/flow_reader_test.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/tests/segmentation_utils_tests.py/flow_reader_test.py b/tests/segmentation_utils_tests.py/flow_reader_test.py index aec09eb..cd80776 100644 --- a/tests/segmentation_utils_tests.py/flow_reader_test.py +++ b/tests/segmentation_utils_tests.py/flow_reader_test.py @@ -5,7 +5,7 @@ from pytest import MonkeyPatch import numpy as np - +# mock implementations def flow_from_directory_mock(*args, **kwargs): channels = 3 if "color_mode" in kwargs and kwargs["color_mode"] == "grayscale": @@ -29,7 +29,7 @@ def flow_from_directory_mock(*args, **kwargs): mock_onehot_fn = lambda x, y, z: np.rollaxis(np.array([x for i in range(z)]), 0, 3) mock_augmentation_fn = lambda x, y, z, a, b: (x, y) - +# tests def test_makes_flow_generator() -> None: patch = MonkeyPatch() # mock an imagedatagenerator from keras @@ -53,6 +53,7 @@ def test_flow_generator_with_preprocess() -> None: "flow_from_directory", flow_from_directory_mock, ) + # mock external dependencies patch.setattr(ImagePreprocessor, "augmentation_pipeline", mock_augmentation_fn) patch.setattr( @@ -94,6 +95,7 @@ def test_get_dataset_size() -> None: def test_get_generator() -> None: patch = MonkeyPatch() + # mock external dependencies patch.setattr(ImagePreprocessor, "augmentation_pipeline", mock_augmentation_fn) patch.setattr( @@ -101,9 +103,11 @@ def test_get_generator() -> None: "onehot_encode", mock_onehot_fn, ) + # create a flow generator flow_generator = FlowGenerator(**generator_args) generator = flow_generator.get_generator() - assert generator + + assert generator != None patch.undo() patch.undo() From 09ea838931b511ac04af75a2b148e884d84ff88a Mon Sep 17 00:00:00 2001 From: Sajtospoga01 Date: Wed, 15 Mar 2023 16:46:18 +0000 Subject: [PATCH 18/59] add tests for imagepreprocessor to test if no default queue is provided, update imagepreprocessor to pass --- .../image_preprocessor_test.py | 19 +++++++++++++++++++ .../segmentation_utils/ImagePreprocessor.py | 10 ++++++---- utilities/segmentation_utils/flowreader.py | 7 +++++-- 3 files changed, 30 insertions(+), 6 deletions(-) diff --git a/tests/segmentation_utils_tests.py/image_preprocessor_test.py b/tests/segmentation_utils_tests.py/image_preprocessor_test.py index 6142c57..5e28247 100644 --- a/tests/segmentation_utils_tests.py/image_preprocessor_test.py +++ b/tests/segmentation_utils_tests.py/image_preprocessor_test.py @@ -56,6 +56,22 @@ def test_image_augmentation_pipeline_column() -> None: assert mask_new.shape == (256 * 256, 1) +def test_image_augmentation_pipeline_no_queue() -> None: + # predifining input variables + image = np.zeros((512, 512, 3)) + mask = np.zeros((256, 256, 1)) + input_size = (512, 512) + output_size = (256, 256) + seed = 0 + + image_new, mask_new = ImagePreprocessor.augmentation_pipeline( + image, mask, input_size, output_size + ) + + assert image_new.shape == (512, 512, 3) + assert mask_new.shape == (256, 256, 1) + + def test_image_augmentation_pipeline_squarematrix() -> None: # predifining input variables image = np.zeros((512, 512, 3)) @@ -114,3 +130,6 @@ def test_flatten() -> None: image = np.zeros((512, 512, 3)) image = ImagePreprocessor.flatten(image, (512, 512), 3) assert image.shape == (512 * 512, 3) + + +#TODO: add tests for checking if errors are raised when the input is not correct \ No newline at end of file diff --git a/utilities/segmentation_utils/ImagePreprocessor.py b/utilities/segmentation_utils/ImagePreprocessor.py index 6765d5f..31fea0c 100644 --- a/utilities/segmentation_utils/ImagePreprocessor.py +++ b/utilities/segmentation_utils/ImagePreprocessor.py @@ -156,19 +156,21 @@ def augmentation_pipeline( image (tf tensor): augmented image mask (tf tensor): augmented mask """ - image_queue.update_seed(seed) - mask_queue.update_seed(seed) - # reshapes masks, such that transforamtions work properly if output_size[1] == 1: mask = tf.reshape(mask, output_size) if image_queue == None and mask_queue == None: + #!Possibly in the wrong place as it has to be regenerated every time image_queue, mask_queue = generate_default_queue() + print("No queue passed, using default queue") + elif image_queue == None or mask_queue == None: raise ValueError("Both queues must be passed or none") - + + image_queue.update_seed(seed) + mask_queue.update_seed(seed) for i, fun in enumerate(image_queue.queue): image = fun(image, **image_queue.arguments[i]) diff --git a/utilities/segmentation_utils/flowreader.py b/utilities/segmentation_utils/flowreader.py index 47b24fe..fe85a39 100644 --- a/utilities/segmentation_utils/flowreader.py +++ b/utilities/segmentation_utils/flowreader.py @@ -15,7 +15,7 @@ def __init__( output_size, num_classes, shuffle=True, - batch_size=32, + batch_size=2, seed=909, ): """ @@ -38,6 +38,7 @@ def __init__( None """ + #TODO: needs testing if len(output_size)!=2: raise ValueError("The output size has to be a tuple of length 2") elif output_size[1] != 1 and output_size[0] != output_size[1]: @@ -54,6 +55,8 @@ def __init__( self.__make_generator() print("Reading images from: ", self.image_path) + + def get_dataset_size(self): """ Returns the length of the dataset @@ -133,7 +136,7 @@ def preprocess(self, generator_zip): Returns: ------- - generator: generator batch + a batch (tuple): generator batch of image and mask """ for (img, mask) in generator_zip: for i in range(len(img)): From 45f75c58cb60ad02c46bc62099c12c830a88af1a Mon Sep 17 00:00:00 2001 From: Sajtospoga01 Date: Wed, 15 Mar 2023 17:26:09 +0000 Subject: [PATCH 19/59] adds new test to image_preprocessor, adds requirements --- requirements.txt | 2 ++ .../image_preprocessor_test.py | 22 +++++++++++++++++++ utilities/segmentation_utils/flowreader.py | 1 + 3 files changed, 25 insertions(+) create mode 100644 requirements.txt diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..a5d8dbe --- /dev/null +++ b/requirements.txt @@ -0,0 +1,2 @@ +tensorflow==2.10 +numpy==1.24.1 diff --git a/tests/segmentation_utils_tests.py/image_preprocessor_test.py b/tests/segmentation_utils_tests.py/image_preprocessor_test.py index 5e28247..3125c9d 100644 --- a/tests/segmentation_utils_tests.py/image_preprocessor_test.py +++ b/tests/segmentation_utils_tests.py/image_preprocessor_test.py @@ -72,6 +72,27 @@ def test_image_augmentation_pipeline_no_queue() -> None: assert mask_new.shape == (256, 256, 1) +def test_image_augmentation_pipeline_error_raised()-> None: + try: + # predifining input variables + image = np.zeros((512, 512, 3)) + mask = np.zeros((256, 256, 1)) + input_size = (512, 512) + output_size = (256, 256) + seed = 0 + # createing dummy queues + image_queue = ImagePreprocessor.PreprocessingQueue( + queue=[lambda x, y, seed: x], arguments=[{"y": 1}] + ) + + image_new, mask_new = ImagePreprocessor.augmentation_pipeline( + image, mask, input_size, output_size, image_queue + ) + assert False + except: + assert True + + def test_image_augmentation_pipeline_squarematrix() -> None: # predifining input variables image = np.zeros((512, 512, 3)) @@ -132,4 +153,5 @@ def test_flatten() -> None: assert image.shape == (512 * 512, 3) + #TODO: add tests for checking if errors are raised when the input is not correct \ No newline at end of file diff --git a/utilities/segmentation_utils/flowreader.py b/utilities/segmentation_utils/flowreader.py index fe85a39..66bcb97 100644 --- a/utilities/segmentation_utils/flowreader.py +++ b/utilities/segmentation_utils/flowreader.py @@ -138,6 +138,7 @@ def preprocess(self, generator_zip): ------- a batch (tuple): generator batch of image and mask """ + #TODO: random factor seed for (img, mask) in generator_zip: for i in range(len(img)): image_seed = np.random.randint(0, 100000) From 3724ed954d142bbb8f3c344e7b740fb42f6d94b4 Mon Sep 17 00:00:00 2001 From: Sajtospoga01 Date: Wed, 15 Mar 2023 20:04:53 +0000 Subject: [PATCH 20/59] adds valueerror testing for flowreader output values --- .../flow_reader_test.py | 42 ++++++++++++++++++- .../image_preprocessor_test.py | 2 +- 2 files changed, 42 insertions(+), 2 deletions(-) diff --git a/tests/segmentation_utils_tests.py/flow_reader_test.py b/tests/segmentation_utils_tests.py/flow_reader_test.py index cd80776..25efc4e 100644 --- a/tests/segmentation_utils_tests.py/flow_reader_test.py +++ b/tests/segmentation_utils_tests.py/flow_reader_test.py @@ -45,6 +45,46 @@ def test_makes_flow_generator() -> None: pass +def test_makes_flow_generator_wrong_shape() -> None: + try: + patch = MonkeyPatch() + # mock an imagedatagenerator from keras + mock_image_datagen = patch.setattr( + ImageDataGenerator, + "flow_from_directory", + flow_from_directory_mock, + ) + patch.setattr(FlowGenerator, "preprocess", lambda self, x: x) + + fail_generator = generator_args.copy() + # create a flow generator + fail_generator["output_size"] = (256, 256, 256) + flow_generator = FlowGenerator(**fail_generator) + assert False + except ValueError: + assert True + + +def test_makes_flow_generator_wrong_dimension() -> None: + try: + patch = MonkeyPatch() + # mock an imagedatagenerator from keras + mock_image_datagen = patch.setattr( + ImageDataGenerator, + "flow_from_directory", + flow_from_directory_mock, + ) + patch.setattr(FlowGenerator, "preprocess", lambda self, x: x) + + fail_generator = generator_args.copy() + # create a flow generator + fail_generator["output_size"] = (256 * 256,2) + flow_generator = FlowGenerator(**fail_generator) + assert False + except ValueError: + assert True + + def test_flow_generator_with_preprocess() -> None: patch = MonkeyPatch() # mock an imagedatagenerator from keras @@ -53,7 +93,7 @@ def test_flow_generator_with_preprocess() -> None: "flow_from_directory", flow_from_directory_mock, ) - + # mock external dependencies patch.setattr(ImagePreprocessor, "augmentation_pipeline", mock_augmentation_fn) patch.setattr( diff --git a/tests/segmentation_utils_tests.py/image_preprocessor_test.py b/tests/segmentation_utils_tests.py/image_preprocessor_test.py index 3125c9d..3436d7a 100644 --- a/tests/segmentation_utils_tests.py/image_preprocessor_test.py +++ b/tests/segmentation_utils_tests.py/image_preprocessor_test.py @@ -89,7 +89,7 @@ def test_image_augmentation_pipeline_error_raised()-> None: image, mask, input_size, output_size, image_queue ) assert False - except: + except ValueError: assert True From 7f23911f23f0c09a4d062b905b2e16cc0487b2c7 Mon Sep 17 00:00:00 2001 From: Sajtospoga01 Date: Wed, 15 Mar 2023 20:12:37 +0000 Subject: [PATCH 21/59] adds random state for the image augmentation pipeline --- utilities/segmentation_utils/flowreader.py | 22 +++++++++++++++++----- 1 file changed, 17 insertions(+), 5 deletions(-) diff --git a/utilities/segmentation_utils/flowreader.py b/utilities/segmentation_utils/flowreader.py index 66bcb97..43b5800 100644 --- a/utilities/segmentation_utils/flowreader.py +++ b/utilities/segmentation_utils/flowreader.py @@ -28,10 +28,17 @@ def __init__( batch_size (int): batch size image_size (tuple): image size output_size (tuple): output size #! Note: in case the output is a column vector it has to be in the shape (x, 1) - #TODO: check if parameter format is correct - num_classes (int): number of classes + + #TODO: check if parameter format is correct shuffle (bool): whether to shuffle the dataset or not + batch_size (int): batch size + seed (int): seed for flow from directory + + Raises: + ------ + ValueError: if the output size is not a tuple of length 2 + ValueError: if the output size is not a square matrix or a column vector Returns: ------- @@ -125,7 +132,7 @@ def get_generator(self): """ return self.train_generator - def preprocess(self, generator_zip): + def preprocess(self, generator_zip,state = None): """ Preprocessor function encapsulates both the image, and mask generator objects. Augments the images and masks and onehot encodes the masks @@ -138,10 +145,15 @@ def preprocess(self, generator_zip): ------- a batch (tuple): generator batch of image and mask """ - #TODO: random factor seed for (img, mask) in generator_zip: for i in range(len(img)): - image_seed = np.random.randint(0, 100000) + #random state for reproducibility + if state is None: + image_seed = np.random.randint(0, 100000) + else: + state = np.random.RandomState(state) + image_seed = state.randint(0, 100000) + img[i], mask[i] = ImagePreprocessor.augmentation_pipeline( img[i], mask[i], self.image_size,self.output_size, seed=image_seed ) From aeb4dc9a1b27dd2ad5e9e29126a8b32f2afaa90b Mon Sep 17 00:00:00 2001 From: Sajtospoga01 Date: Wed, 15 Mar 2023 20:16:45 +0000 Subject: [PATCH 22/59] updates documentation --- .../flow_reader_test.py | 2 +- utilities/segmentation_utils/flowreader.py | 21 ++++++++++--------- 2 files changed, 12 insertions(+), 11 deletions(-) diff --git a/tests/segmentation_utils_tests.py/flow_reader_test.py b/tests/segmentation_utils_tests.py/flow_reader_test.py index 25efc4e..1b6f03b 100644 --- a/tests/segmentation_utils_tests.py/flow_reader_test.py +++ b/tests/segmentation_utils_tests.py/flow_reader_test.py @@ -78,7 +78,7 @@ def test_makes_flow_generator_wrong_dimension() -> None: fail_generator = generator_args.copy() # create a flow generator - fail_generator["output_size"] = (256 * 256,2) + fail_generator["output_size"] = (256 * 256, 2) flow_generator = FlowGenerator(**fail_generator) assert False except ValueError: diff --git a/utilities/segmentation_utils/flowreader.py b/utilities/segmentation_utils/flowreader.py index 43b5800..2d30b0a 100644 --- a/utilities/segmentation_utils/flowreader.py +++ b/utilities/segmentation_utils/flowreader.py @@ -19,7 +19,7 @@ def __init__( seed=909, ): """ - Initializes the flow generator object + Initializes the flow generator object, which can be used to read in images for semantic segmentation. Additionally, the reader can apply augmentation on the images, and one-hot encode them on the fly. Parameters: ---------- @@ -30,7 +30,8 @@ def __init__( output_size (tuple): output size #! Note: in case the output is a column vector it has to be in the shape (x, 1) num_classes (int): number of classes - #TODO: check if parameter format is correct + Keyword Arguments: + ----------------- shuffle (bool): whether to shuffle the dataset or not batch_size (int): batch size seed (int): seed for flow from directory @@ -45,11 +46,13 @@ def __init__( None """ - #TODO: needs testing - if len(output_size)!=2: + # TODO: needs testing + if len(output_size) != 2: raise ValueError("The output size has to be a tuple of length 2") elif output_size[1] != 1 and output_size[0] != output_size[1]: - raise ValueError("The output size has to be a square matrix or a column vector") + raise ValueError( + "The output size has to be a square matrix or a column vector" + ) self.image_path = image_path self.mask_path = mask_path @@ -62,8 +65,6 @@ def __init__( self.__make_generator() print("Reading images from: ", self.image_path) - - def get_dataset_size(self): """ Returns the length of the dataset @@ -132,7 +133,7 @@ def get_generator(self): """ return self.train_generator - def preprocess(self, generator_zip,state = None): + def preprocess(self, generator_zip, state=None): """ Preprocessor function encapsulates both the image, and mask generator objects. Augments the images and masks and onehot encodes the masks @@ -147,7 +148,7 @@ def preprocess(self, generator_zip,state = None): """ for (img, mask) in generator_zip: for i in range(len(img)): - #random state for reproducibility + # random state for reproducibility if state is None: image_seed = np.random.randint(0, 100000) else: @@ -155,7 +156,7 @@ def preprocess(self, generator_zip,state = None): image_seed = state.randint(0, 100000) img[i], mask[i] = ImagePreprocessor.augmentation_pipeline( - img[i], mask[i], self.image_size,self.output_size, seed=image_seed + img[i], mask[i], self.image_size, self.output_size, seed=image_seed ) mask = ImagePreprocessor.onehot_encode( mask, self.output_size, self.num_classes From 4670bf9373540c2a8ecf6549671bc4962332f0dd Mon Sep 17 00:00:00 2001 From: Sajtospoga01 Date: Wed, 15 Mar 2023 20:32:50 +0000 Subject: [PATCH 23/59] update gitignore and update documentation on flowreader --- .gitignore | 1 + .../flow_reader_test.py | 17 +++++++------ utilities/segmentation_utils/flowreader.py | 24 ++++++++++++++----- 3 files changed, 29 insertions(+), 13 deletions(-) diff --git a/.gitignore b/.gitignore index 164719c..6671661 100644 --- a/.gitignore +++ b/.gitignore @@ -134,4 +134,5 @@ dmypy.json archive.lnk colab_notebook.ipynb +/.vscode/ diff --git a/tests/segmentation_utils_tests.py/flow_reader_test.py b/tests/segmentation_utils_tests.py/flow_reader_test.py index 1b6f03b..a031da8 100644 --- a/tests/segmentation_utils_tests.py/flow_reader_test.py +++ b/tests/segmentation_utils_tests.py/flow_reader_test.py @@ -1,9 +1,12 @@ import os -from utilities.segmentation_utils.flowreader import FlowGenerator + +import numpy as np from keras.preprocessing.image import ImageDataGenerator -from utilities.segmentation_utils import ImagePreprocessor from pytest import MonkeyPatch -import numpy as np + +from utilities.segmentation_utils import ImagePreprocessor +from utilities.segmentation_utils.flowreader import FlowGenerator + # mock implementations def flow_from_directory_mock(*args, **kwargs): @@ -69,7 +72,7 @@ def test_makes_flow_generator_wrong_dimension() -> None: try: patch = MonkeyPatch() # mock an imagedatagenerator from keras - mock_image_datagen = patch.setattr( + patch.setattr( ImageDataGenerator, "flow_from_directory", flow_from_directory_mock, @@ -79,7 +82,7 @@ def test_makes_flow_generator_wrong_dimension() -> None: fail_generator = generator_args.copy() # create a flow generator fail_generator["output_size"] = (256 * 256, 2) - flow_generator = FlowGenerator(**fail_generator) + FlowGenerator(**fail_generator) assert False except ValueError: assert True @@ -88,7 +91,7 @@ def test_makes_flow_generator_wrong_dimension() -> None: def test_flow_generator_with_preprocess() -> None: patch = MonkeyPatch() # mock an imagedatagenerator from keras - mock_image_datagen = patch.setattr( + patch.setattr( ImageDataGenerator, "flow_from_directory", flow_from_directory_mock, @@ -103,7 +106,7 @@ def test_flow_generator_with_preprocess() -> None: ) # create a flow generator - flow_generator = FlowGenerator(**generator_args) + FlowGenerator(**generator_args) patch.undo() patch.undo() diff --git a/utilities/segmentation_utils/flowreader.py b/utilities/segmentation_utils/flowreader.py index 2d30b0a..c5b2511 100644 --- a/utilities/segmentation_utils/flowreader.py +++ b/utilities/segmentation_utils/flowreader.py @@ -1,12 +1,21 @@ +""" +FlowGenerator is a wrapper around the keras ImageDataGenerator class. +""" + import os import numpy as np from keras.preprocessing.image import ImageDataGenerator from utilities.segmentation_utils import ImagePreprocessor -#! important: as the I have no clue how we can mount this repo as a package the import is relative to the working directory - class FlowGenerator: + """ + FlowGenerator is a wrapper around the keras ImageDataGenerator class. + It can be used to read in images for semantic segmentation. + Additionally, the reader can apply augmentation on the images, + and one-hot encode them on the fly. + """ + def __init__( self, image_path, @@ -19,7 +28,10 @@ def __init__( seed=909, ): """ - Initializes the flow generator object, which can be used to read in images for semantic segmentation. Additionally, the reader can apply augmentation on the images, and one-hot encode them on the fly. + Initializes the flow generator object, + which can be used to read in images for semantic segmentation. + Additionally, the reader can apply augmentation on the images, + and one-hot encode them on the fly. Parameters: ---------- @@ -27,7 +39,8 @@ def __init__( mask (string): path to the mask directory batch_size (int): batch size image_size (tuple): image size - output_size (tuple): output size #! Note: in case the output is a column vector it has to be in the shape (x, 1) + output_size (tuple): output size + #! Note: in case the output is a column vector it has to be in the shape (x, 1) num_classes (int): number of classes Keyword Arguments: @@ -46,10 +59,9 @@ def __init__( None """ - # TODO: needs testing if len(output_size) != 2: raise ValueError("The output size has to be a tuple of length 2") - elif output_size[1] != 1 and output_size[0] != output_size[1]: + if output_size[1] != 1 and output_size[0] != output_size[1]: raise ValueError( "The output size has to be a square matrix or a column vector" ) From 8a2f96e40ef97e3e2fe0960e3537af530254a94f Mon Sep 17 00:00:00 2001 From: Sajtospoga01 Date: Thu, 16 Mar 2023 16:36:06 +0000 Subject: [PATCH 24/59] adds documentation files, adds rst files to gitignore --- .gitignore | 2 ++ docs/Makefile | 20 ++++++++++++++++++++ docs/conf.py | 34 ++++++++++++++++++++++++++++++++++ docs/make.bat | 35 +++++++++++++++++++++++++++++++++++ 4 files changed, 91 insertions(+) create mode 100644 docs/Makefile create mode 100644 docs/conf.py create mode 100644 docs/make.bat diff --git a/.gitignore b/.gitignore index 6671661..ef3ded4 100644 --- a/.gitignore +++ b/.gitignore @@ -136,3 +136,5 @@ archive.lnk colab_notebook.ipynb /.vscode/ +*.rst + diff --git a/docs/Makefile b/docs/Makefile new file mode 100644 index 0000000..d4bb2cb --- /dev/null +++ b/docs/Makefile @@ -0,0 +1,20 @@ +# Minimal makefile for Sphinx documentation +# + +# You can set these variables from the command line, and also +# from the environment for the first two. +SPHINXOPTS ?= +SPHINXBUILD ?= sphinx-build +SOURCEDIR = . +BUILDDIR = _build + +# Put it first so that "make" without argument is like "make help". +help: + @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) + +.PHONY: help Makefile + +# Catch-all target: route all unknown targets to Sphinx using the new +# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). +%: Makefile + @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/docs/conf.py b/docs/conf.py new file mode 100644 index 0000000..644e32d --- /dev/null +++ b/docs/conf.py @@ -0,0 +1,34 @@ +# Configuration file for the Sphinx documentation builder. +# +# For the full list of built-in configuration values, see the documentation: +# https://www.sphinx-doc.org/en/master/usage/configuration.html + + +import os +import sys +sys.path.insert(0, os.path.abspath('..')) + + +# -- Project information ----------------------------------------------------- +# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information + +project = 'test' +copyright = '2023, test' +author = 'test' +release = '1.0.0' + +# -- General configuration --------------------------------------------------- +# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration + +extensions = ['sphinx.ext.todo', 'sphinx.ext.viewcode', 'sphinx.ext.autodoc'] + +templates_path = ['_templates'] +exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] + + + +# -- Options for HTML output ------------------------------------------------- +# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output + +html_theme = 'alabaster' +html_static_path = ['_static'] diff --git a/docs/make.bat b/docs/make.bat new file mode 100644 index 0000000..32bb245 --- /dev/null +++ b/docs/make.bat @@ -0,0 +1,35 @@ +@ECHO OFF + +pushd %~dp0 + +REM Command file for Sphinx documentation + +if "%SPHINXBUILD%" == "" ( + set SPHINXBUILD=sphinx-build +) +set SOURCEDIR=. +set BUILDDIR=_build + +%SPHINXBUILD% >NUL 2>NUL +if errorlevel 9009 ( + echo. + echo.The 'sphinx-build' command was not found. Make sure you have Sphinx + echo.installed, then set the SPHINXBUILD environment variable to point + echo.to the full path of the 'sphinx-build' executable. Alternatively you + echo.may add the Sphinx directory to PATH. + echo. + echo.If you don't have Sphinx installed, grab it from + echo.https://www.sphinx-doc.org/ + exit /b 1 +) + +if "%1" == "" goto help + +%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% +goto end + +:help +%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% + +:end +popd From b966ebebd99d963f1cba6e3946b91d2bc94702e7 Mon Sep 17 00:00:00 2001 From: Sajtospoga01 Date: Thu, 16 Mar 2023 16:38:27 +0000 Subject: [PATCH 25/59] update documentation theme --- docs/conf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/conf.py b/docs/conf.py index 644e32d..90e56b5 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -30,5 +30,5 @@ # -- Options for HTML output ------------------------------------------------- # https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output -html_theme = 'alabaster' +html_theme = 'sphinx_rtd_theme' html_static_path = ['_static'] From fe59467f37e8a43ce8febd9afcb12a38aacdfefb Mon Sep 17 00:00:00 2001 From: Sajtospoga01 Date: Thu, 16 Mar 2023 17:00:36 +0000 Subject: [PATCH 26/59] updates documentation to include all packages on the index page --- .gitignore | 1 - docs/conf.py | 11 +++++--- docs/index.rst | 20 +++++++++++++++ docs/modules.rst | 8 ++++++ docs/tests.rst | 10 ++++++++ docs/utilities.rst | 37 +++++++++++++++++++++++++++ docs/utilities.segmentation_utils.rst | 29 +++++++++++++++++++++ index.rst | 20 +++++++++++++++ 8 files changed, 131 insertions(+), 5 deletions(-) create mode 100644 docs/index.rst create mode 100644 docs/modules.rst create mode 100644 docs/tests.rst create mode 100644 docs/utilities.rst create mode 100644 docs/utilities.segmentation_utils.rst create mode 100644 index.rst diff --git a/.gitignore b/.gitignore index ef3ded4..f9b1cf3 100644 --- a/.gitignore +++ b/.gitignore @@ -136,5 +136,4 @@ archive.lnk colab_notebook.ipynb /.vscode/ -*.rst diff --git a/docs/conf.py b/docs/conf.py index 90e56b5..41fbba7 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -9,13 +9,16 @@ sys.path.insert(0, os.path.abspath('..')) + # -- Project information ----------------------------------------------------- # https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information -project = 'test' -copyright = '2023, test' -author = 'test' -release = '1.0.0' + + +project = 'GU Orbit Software Utilities' +copyright = '2023, GU Orbit Software Team' +author = 'GU Orbit Software Team' +release = '0.1.0' # -- General configuration --------------------------------------------------- # https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration diff --git a/docs/index.rst b/docs/index.rst new file mode 100644 index 0000000..930e21c --- /dev/null +++ b/docs/index.rst @@ -0,0 +1,20 @@ +.. GU Orbit Software Utilities documentation master file, created by + sphinx-quickstart on Thu Mar 16 16:56:02 2023. + You can adapt this file completely to your liking, but it should at least + contain the root `toctree` directive. + +Welcome to GU Orbit Software Utilities's documentation! +======================================================= + +.. toctree:: + :maxdepth: 2 + :caption: Contents: + + modules + +Indices and tables +================== + +* :ref:`genindex` +* :ref:`modindex` +* :ref:`search` diff --git a/docs/modules.rst b/docs/modules.rst new file mode 100644 index 0000000..d62cd7d --- /dev/null +++ b/docs/modules.rst @@ -0,0 +1,8 @@ +utilities +========= + +.. toctree:: + :maxdepth: 4 + + tests + utilities diff --git a/docs/tests.rst b/docs/tests.rst new file mode 100644 index 0000000..cdc5cc2 --- /dev/null +++ b/docs/tests.rst @@ -0,0 +1,10 @@ +tests package +============= + +Module contents +--------------- + +.. automodule:: tests + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/utilities.rst b/docs/utilities.rst new file mode 100644 index 0000000..1230201 --- /dev/null +++ b/docs/utilities.rst @@ -0,0 +1,37 @@ +utilities package +================= + +Subpackages +----------- + +.. toctree:: + :maxdepth: 4 + + utilities.segmentation_utils + +Submodules +---------- + +utilities.image\_cutting module +------------------------------- + +.. automodule:: utilities.image_cutting + :members: + :undoc-members: + :show-inheritance: + +utilities.image\_loading module +------------------------------- + +.. automodule:: utilities.image_loading + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: utilities + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/utilities.segmentation_utils.rst b/docs/utilities.segmentation_utils.rst new file mode 100644 index 0000000..41251d0 --- /dev/null +++ b/docs/utilities.segmentation_utils.rst @@ -0,0 +1,29 @@ +utilities.segmentation\_utils package +===================================== + +Submodules +---------- + +utilities.segmentation\_utils.ImagePreprocessor module +------------------------------------------------------ + +.. automodule:: utilities.segmentation_utils.ImagePreprocessor + :members: + :undoc-members: + :show-inheritance: + +utilities.segmentation\_utils.flowreader module +----------------------------------------------- + +.. automodule:: utilities.segmentation_utils.flowreader + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: utilities.segmentation_utils + :members: + :undoc-members: + :show-inheritance: diff --git a/index.rst b/index.rst new file mode 100644 index 0000000..a5ee92f --- /dev/null +++ b/index.rst @@ -0,0 +1,20 @@ +.. GU Orbit Software Utilities documentation master file, created by + sphinx-quickstart on Thu Mar 16 16:55:14 2023. + You can adapt this file completely to your liking, but it should at least + contain the root `toctree` directive. + +Welcome to GU Orbit Software Utilities's documentation! +======================================================= + +.. toctree:: + :maxdepth: 2 + :caption: Contents: + + + +Indices and tables +================== + +* :ref:`genindex` +* :ref:`modindex` +* :ref:`search` From 660237625d9bad38bdc4eb12bb7eb1b5df3e1939 Mon Sep 17 00:00:00 2001 From: Sajtospoga01 Date: Thu, 16 Mar 2023 17:08:42 +0000 Subject: [PATCH 27/59] updates styling of the documentation --- docs/conf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/conf.py b/docs/conf.py index 41fbba7..0f77733 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -33,5 +33,5 @@ # -- Options for HTML output ------------------------------------------------- # https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output -html_theme = 'sphinx_rtd_theme' +html_theme = 'insegel' html_static_path = ['_static'] From 4b0bdb0d6a6a97bb825e8283518bf6260f9169d2 Mon Sep 17 00:00:00 2001 From: Andras Bodrogai <81911031+Sajtospoga01@users.noreply.github.com> Date: Thu, 16 Mar 2023 18:00:51 +0000 Subject: [PATCH 28/59] Create .github/workflows/static.yml --- .github/workflows/static.yml | 63 ++++++++++++++++++++++++++++++++++++ 1 file changed, 63 insertions(+) create mode 100644 .github/workflows/static.yml diff --git a/.github/workflows/static.yml b/.github/workflows/static.yml new file mode 100644 index 0000000..0a1bc06 --- /dev/null +++ b/.github/workflows/static.yml @@ -0,0 +1,63 @@ +# Simple workflow for deploying static content to GitHub Pages +name: Deploy static content to Pages + +on: + # Runs on pushes targeting the default branch + push: + branches: ["sphinx_documentation"] + + # Allows you to run this workflow manually from the Actions tab + workflow_dispatch: + +# Sets permissions of the GITHUB_TOKEN to allow deployment to GitHub Pages +permissions: + contents: read + pages: write + id-token: write + +# Allow only one concurrent deployment, skipping runs queued between the run in-progress and latest queued. +# However, do NOT cancel in-progress runs as we want to allow these production deployments to complete. +concurrency: + group: "pages" + cancel-in-progress: false + +jobs: + # Single deploy job since we're just deploying + build: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - name: Set up Python 3.10 + uses: actions/setup-python@v3 + with: + python-version: "3.10" + - name: Install dependencies + run: | + python -m pip install --upgrade pip + if [ -f requirements.txt ]; then pip install -r requirements.txt; fi + pip install -U sphinx + pip install insegel + - name: Build documentation + run: | + cd docs + sphinx-apidoc -o . .. + make html + + deploy: + environment: + name: github-pages + url: ${{ steps.deployment.outputs.page_url }} + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v3 + - name: Setup Pages + uses: actions/configure-pages@v3 + - name: Upload artifact + uses: actions/upload-pages-artifact@v1 + with: + # Upload entire repository + path: '.' + - name: Deploy to GitHub Pages + id: deployment + uses: actions/deploy-pages@v1 From bdd5040d1997a7cfd5892c2ac2478c4a68873b60 Mon Sep 17 00:00:00 2001 From: Andras Bodrogai <81911031+Sajtospoga01@users.noreply.github.com> Date: Thu, 16 Mar 2023 18:06:13 +0000 Subject: [PATCH 29/59] updates deployment configuration makes deployment dependent on build --- .github/workflows/static.yml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/.github/workflows/static.yml b/.github/workflows/static.yml index 0a1bc06..3c13b00 100644 --- a/.github/workflows/static.yml +++ b/.github/workflows/static.yml @@ -1,5 +1,5 @@ # Simple workflow for deploying static content to GitHub Pages -name: Deploy static content to Pages +name: Deploy Documentation on Pages on: # Runs on pushes targeting the default branch @@ -44,6 +44,7 @@ jobs: make html deploy: + needs: build environment: name: github-pages url: ${{ steps.deployment.outputs.page_url }} @@ -57,7 +58,7 @@ jobs: uses: actions/upload-pages-artifact@v1 with: # Upload entire repository - path: '.' + path: './docs/_build/html' - name: Deploy to GitHub Pages id: deployment uses: actions/deploy-pages@v1 From 80db98881617951eacc63d738994d4c35f2286a3 Mon Sep 17 00:00:00 2001 From: Andras Bodrogai <81911031+Sajtospoga01@users.noreply.github.com> Date: Thu, 16 Mar 2023 18:11:38 +0000 Subject: [PATCH 30/59] Update static.yml --- .github/workflows/static.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/static.yml b/.github/workflows/static.yml index 3c13b00..6ce0b74 100644 --- a/.github/workflows/static.yml +++ b/.github/workflows/static.yml @@ -40,8 +40,10 @@ jobs: - name: Build documentation run: | cd docs + sphinx-apidoc -o . .. make html + ls deploy: needs: build From 589e85944a02ffdf6e65abc6fd62f1a6c645347f Mon Sep 17 00:00:00 2001 From: Andras Bodrogai <81911031+Sajtospoga01@users.noreply.github.com> Date: Thu, 16 Mar 2023 18:17:49 +0000 Subject: [PATCH 31/59] Update static.yml test new configuration --- .github/workflows/static.yml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/.github/workflows/static.yml b/.github/workflows/static.yml index 6ce0b74..fc383f9 100644 --- a/.github/workflows/static.yml +++ b/.github/workflows/static.yml @@ -43,7 +43,9 @@ jobs: sphinx-apidoc -o . .. make html - ls + with: + name: documentation + path: ./docs deploy: needs: build @@ -60,7 +62,7 @@ jobs: uses: actions/upload-pages-artifact@v1 with: # Upload entire repository - path: './docs/_build/html' + path: './docs' - name: Deploy to GitHub Pages id: deployment uses: actions/deploy-pages@v1 From 7fe3f0320fd8ee68e0abbc4b1ef187ae4bdbfa0a Mon Sep 17 00:00:00 2001 From: Andras Bodrogai <81911031+Sajtospoga01@users.noreply.github.com> Date: Thu, 16 Mar 2023 18:20:50 +0000 Subject: [PATCH 32/59] Update static.yml checking the source directory --- .github/workflows/static.yml | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/.github/workflows/static.yml b/.github/workflows/static.yml index fc383f9..d4a2cb2 100644 --- a/.github/workflows/static.yml +++ b/.github/workflows/static.yml @@ -40,12 +40,8 @@ jobs: - name: Build documentation run: | cd docs - sphinx-apidoc -o . .. make html - with: - name: documentation - path: ./docs deploy: needs: build @@ -54,10 +50,17 @@ jobs: url: ${{ steps.deployment.outputs.page_url }} runs-on: ubuntu-latest steps: + - name: Checkout folder + run: | + cd docs + ls + - name: Checkout uses: actions/checkout@v3 - name: Setup Pages uses: actions/configure-pages@v3 + + - name: Upload artifact uses: actions/upload-pages-artifact@v1 with: From e84bad4c7c4205836cf5686a40f1635790b1cae7 Mon Sep 17 00:00:00 2001 From: Andras Bodrogai <81911031+Sajtospoga01@users.noreply.github.com> Date: Thu, 16 Mar 2023 18:31:55 +0000 Subject: [PATCH 33/59] update CD to save artifacts between jobs update --- .github/workflows/static.yml | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/.github/workflows/static.yml b/.github/workflows/static.yml index d4a2cb2..56ff672 100644 --- a/.github/workflows/static.yml +++ b/.github/workflows/static.yml @@ -42,6 +42,11 @@ jobs: cd docs sphinx-apidoc -o . .. make html + - name: Upload build data + uses: actions/upload-artifact@v3 + with: + name: documentation + path: ./docs/_build/html deploy: needs: build @@ -50,17 +55,15 @@ jobs: url: ${{ steps.deployment.outputs.page_url }} runs-on: ubuntu-latest steps: - - name: Checkout folder - run: | - cd docs - ls - + - name: Checkout uses: actions/checkout@v3 - name: Setup Pages uses: actions/configure-pages@v3 - - + - name: Download built directory + uses: actions/download-artifact@v3 + with: + name: documentation - name: Upload artifact uses: actions/upload-pages-artifact@v1 with: @@ -69,3 +72,5 @@ jobs: - name: Deploy to GitHub Pages id: deployment uses: actions/deploy-pages@v1 + with: + folder: build From dded00c7536c53f60c85d46506e29741a6a8d97b Mon Sep 17 00:00:00 2001 From: Andras Bodrogai <81911031+Sajtospoga01@users.noreply.github.com> Date: Thu, 16 Mar 2023 18:38:29 +0000 Subject: [PATCH 34/59] update path for pages --- .github/workflows/static.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/static.yml b/.github/workflows/static.yml index 56ff672..9f7ba2e 100644 --- a/.github/workflows/static.yml +++ b/.github/workflows/static.yml @@ -68,7 +68,7 @@ jobs: uses: actions/upload-pages-artifact@v1 with: # Upload entire repository - path: './docs' + path: '.' - name: Deploy to GitHub Pages id: deployment uses: actions/deploy-pages@v1 From 04ab41b169c195f9279d9159789127c00e21f24e Mon Sep 17 00:00:00 2001 From: Sajtospoga01 Date: Fri, 17 Mar 2023 17:18:03 +0000 Subject: [PATCH 35/59] update documentation on utilities repo --- .github/workflows/static.yml | 5 +- docs/conf.py | 13 +- docs/style/custom.css | 3 + docs/tests.rst | 3 - docs/utilities.image_cutting.rst | 7 ++ docs/utilities.image_loading.rst | 7 ++ docs/utilities.rst | 31 ++--- ...s.segmentation_utils.ImagePreprocessor.rst | 7 ++ ...tilities.segmentation_utils.flowreader.rst | 7 ++ docs/utilities.segmentation_utils.rst | 27 ++-- index.rst | 20 --- .../segmentation_utils/ImagePreprocessor.py | 118 ++++++++---------- utilities/segmentation_utils/flowreader.py | 65 ++++------ 13 files changed, 137 insertions(+), 176 deletions(-) create mode 100644 docs/style/custom.css create mode 100644 docs/utilities.image_cutting.rst create mode 100644 docs/utilities.image_loading.rst create mode 100644 docs/utilities.segmentation_utils.ImagePreprocessor.rst create mode 100644 docs/utilities.segmentation_utils.flowreader.rst delete mode 100644 index.rst diff --git a/.github/workflows/static.yml b/.github/workflows/static.yml index 9f7ba2e..aaa14ad 100644 --- a/.github/workflows/static.yml +++ b/.github/workflows/static.yml @@ -36,11 +36,12 @@ jobs: python -m pip install --upgrade pip if [ -f requirements.txt ]; then pip install -r requirements.txt; fi pip install -U sphinx - pip install insegel + pip install furo + - name: Build documentation run: | cd docs - sphinx-apidoc -o . .. + sphinx-apidoc -e -M -o . .. make html - name: Upload build data uses: actions/upload-artifact@v3 diff --git a/docs/conf.py b/docs/conf.py index 0f77733..ef96845 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -22,7 +22,7 @@ # -- General configuration --------------------------------------------------- # https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration - +add_module_names = False extensions = ['sphinx.ext.todo', 'sphinx.ext.viewcode', 'sphinx.ext.autodoc'] templates_path = ['_templates'] @@ -30,8 +30,15 @@ + # -- Options for HTML output ------------------------------------------------- # https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output -html_theme = 'insegel' -html_static_path = ['_static'] +#insegel theme +#furo theme + +html_theme = 'furo' + + +html_static_path = ['style'] +html_css_files = ['custom.css'] diff --git a/docs/style/custom.css b/docs/style/custom.css new file mode 100644 index 0000000..74826ff --- /dev/null +++ b/docs/style/custom.css @@ -0,0 +1,3 @@ +dl.py .field-list dt { + text-transform: none !important; +} \ No newline at end of file diff --git a/docs/tests.rst b/docs/tests.rst index cdc5cc2..d5113ea 100644 --- a/docs/tests.rst +++ b/docs/tests.rst @@ -1,9 +1,6 @@ tests package ============= -Module contents ---------------- - .. automodule:: tests :members: :undoc-members: diff --git a/docs/utilities.image_cutting.rst b/docs/utilities.image_cutting.rst new file mode 100644 index 0000000..db47b90 --- /dev/null +++ b/docs/utilities.image_cutting.rst @@ -0,0 +1,7 @@ +utilities.image\_cutting module +=============================== + +.. automodule:: utilities.image_cutting + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/utilities.image_loading.rst b/docs/utilities.image_loading.rst new file mode 100644 index 0000000..5707cfc --- /dev/null +++ b/docs/utilities.image_loading.rst @@ -0,0 +1,7 @@ +utilities.image\_loading module +=============================== + +.. automodule:: utilities.image_loading + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/utilities.rst b/docs/utilities.rst index 1230201..8f80976 100644 --- a/docs/utilities.rst +++ b/docs/utilities.rst @@ -1,6 +1,11 @@ utilities package ================= +.. automodule:: utilities + :members: + :undoc-members: + :show-inheritance: + Subpackages ----------- @@ -12,26 +17,8 @@ Subpackages Submodules ---------- -utilities.image\_cutting module -------------------------------- - -.. automodule:: utilities.image_cutting - :members: - :undoc-members: - :show-inheritance: - -utilities.image\_loading module -------------------------------- - -.. automodule:: utilities.image_loading - :members: - :undoc-members: - :show-inheritance: - -Module contents ---------------- +.. toctree:: + :maxdepth: 4 -.. automodule:: utilities - :members: - :undoc-members: - :show-inheritance: + utilities.image_cutting + utilities.image_loading diff --git a/docs/utilities.segmentation_utils.ImagePreprocessor.rst b/docs/utilities.segmentation_utils.ImagePreprocessor.rst new file mode 100644 index 0000000..ab26b9a --- /dev/null +++ b/docs/utilities.segmentation_utils.ImagePreprocessor.rst @@ -0,0 +1,7 @@ +utilities.segmentation\_utils.ImagePreprocessor module +====================================================== + +.. automodule:: utilities.segmentation_utils.ImagePreprocessor + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/utilities.segmentation_utils.flowreader.rst b/docs/utilities.segmentation_utils.flowreader.rst new file mode 100644 index 0000000..972d67e --- /dev/null +++ b/docs/utilities.segmentation_utils.flowreader.rst @@ -0,0 +1,7 @@ +utilities.segmentation\_utils.flowreader module +=============================================== + +.. automodule:: utilities.segmentation_utils.flowreader + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/utilities.segmentation_utils.rst b/docs/utilities.segmentation_utils.rst index 41251d0..7fe2dc9 100644 --- a/docs/utilities.segmentation_utils.rst +++ b/docs/utilities.segmentation_utils.rst @@ -1,29 +1,16 @@ utilities.segmentation\_utils package ===================================== -Submodules ----------- - -utilities.segmentation\_utils.ImagePreprocessor module ------------------------------------------------------- - -.. automodule:: utilities.segmentation_utils.ImagePreprocessor +.. automodule:: utilities.segmentation_utils :members: :undoc-members: :show-inheritance: -utilities.segmentation\_utils.flowreader module ------------------------------------------------ - -.. automodule:: utilities.segmentation_utils.flowreader - :members: - :undoc-members: - :show-inheritance: +Submodules +---------- -Module contents ---------------- +.. toctree:: + :maxdepth: 4 -.. automodule:: utilities.segmentation_utils - :members: - :undoc-members: - :show-inheritance: + utilities.segmentation_utils.ImagePreprocessor + utilities.segmentation_utils.flowreader diff --git a/index.rst b/index.rst deleted file mode 100644 index a5ee92f..0000000 --- a/index.rst +++ /dev/null @@ -1,20 +0,0 @@ -.. GU Orbit Software Utilities documentation master file, created by - sphinx-quickstart on Thu Mar 16 16:55:14 2023. - You can adapt this file completely to your liking, but it should at least - contain the root `toctree` directive. - -Welcome to GU Orbit Software Utilities's documentation! -======================================================= - -.. toctree:: - :maxdepth: 2 - :caption: Contents: - - - -Indices and tables -================== - -* :ref:`genindex` -* :ref:`modindex` -* :ref:`search` diff --git a/utilities/segmentation_utils/ImagePreprocessor.py b/utilities/segmentation_utils/ImagePreprocessor.py index 31fea0c..fb71056 100644 --- a/utilities/segmentation_utils/ImagePreprocessor.py +++ b/utilities/segmentation_utils/ImagePreprocessor.py @@ -9,13 +9,10 @@ class PreprocessingQueue: """ object to initialize a preprocessing queue - Parameters: + Parameters ---------- - queue (list): list of functions to be applied - - Returns: - ------- - None + :queue list: list of functions to be applied + :arguments list[dict]: list of arguments to be passed to the functions """ queue: list[Callable] @@ -25,13 +22,9 @@ def update_seed(self, seed): """ Changes the seed of the queue - Parameters: + Parameters ---------- - seed (int): seed to be changed to - - Returns: - ------- - None + :seed int: seed to be changed to """ for i in self.arguments: i["seed"] = seed @@ -40,13 +33,10 @@ def get_queue_length(self): """ Returns the length of the queue - Parameters: - ---------- - None - - Returns: + Returns ------- - int: length of the queue + :return: length of the queue + :rtype: int """ return len(self.queue) @@ -55,17 +45,13 @@ def generate_default_queue(seed=0): """ Generates the default processing queue - Parameters: - ---------- - None - - Keyword Arguments: + Keyword Arguments ----------------- - seed (int): seed to be used for the random functions + :seed int: seed to be used for the random functions - Returns: + Returns ------- - PreprocessingQueue: default queue + :return PreprocessingQueue: default queue """ image_queue = PreprocessingQueue( queue=[ @@ -100,17 +86,17 @@ def generate_default_queue(seed=0): def onehot_encode(masks, output_size, num_classes): """ - Onehot encodes the images coming from the image generator object - Parameters: - ---------- - masks (tf tensor): masks to be onehot encoded - output_size (tuple): size of the output image, it is specified as (height, width) #!Note that for a column vector the width is 1 - num_classes (int): number of classes in the mask, to be onehot encoded + Function that one-hot encodes masks - Returns: + :batch(tf.Tensor) masks: Masks to be encoded + :tuple(int, int) output_size: Output size of the masks + :int num_classes: Number of classes in the masks + + Returns ------- - encoded (tf tensor): onehot encoded masks - """ + :return: Encoded masks + :rtype: batch(tf.Tensor) + """ encoded = np.zeros( (masks.shape[0], output_size[0] * output_size[1] , num_classes) ) @@ -130,32 +116,33 @@ def augmentation_pipeline( seed=0, ): """ - Applies augmentation pipeline to the image and mask - If no queue is passed a default processing queue is created + Function that can execute a set of predifined augmentation functions + stored in a PreprocessingQueue object. It augments both the image and the mask + with the same functions and arguments. - Parameters: + Parameters ---------- - image (tf tensor): image to be augmented - mask (tf tensor): mask to be augmented - input_size (tuple): size of the input image - output_size (tuple): size of the output image - - Keyword Arguments: + :tf.Tensor image: The image to be processed + :tf.Tensor mask: The mask to be processed + :tuple(int, int) input_size: Input size of the image + :tuple(int, int) output_size: Output size of the image + + Keyword Arguments ----------------- - image_queue (PreprocessingQueue): queue of image processing functions - mask_queue (PreprocessingQueue): queue of mask processing functions - channels (int): number of channels in the image - seed (int): seed to be used for the random functions + :PreprocessingQueue, optional mask_queue image_queue: Augmentation processing queue for images, defaults to None + :PreprocessingQueue, optional mask_queue: Augmentation processing queue for masks, defaults to None + :int, optional channels: Number of bands in the image, defaults to 3 + :int, optional seed: The seed to be used in the pipeline, defaults to 0 - Raises: + Raises ------ - ValueError: if only one queue is passed - - Returns: + :raises ValueError: If only one of the queues is passed + + Returns ------- - image (tf tensor): augmented image - mask (tf tensor): augmented mask - """ + :return: tuple of the processed image and mask + :rtype: tuple(tf.Tensor, tf.Tensor) + """ # reshapes masks, such that transforamtions work properly if output_size[1] == 1: @@ -184,20 +171,21 @@ def augmentation_pipeline( def flatten(image, input_size, channels=1): - """ - Flattens an input image, with reserving the channels + """flatten + Function that flattens an image preserving the number of channels - Parameters: + Parameters ---------- - image (tf tensor): image to be flattened - input_size (tuple): size of the input image + :tf.Tensor image: image to be flattened + :tuple(int, int) input_size: input size of the image - Keyword Arguments: + Keyword Arguments ----------------- - channels (int): number of channels in the image - - Returns: + :int, optional channels: number of chanels to preserve, defaults to 1 + + Returns ------- - image (tf tensor): flattened image + :return: flattened image + :rtype: tf.Tensor """ return tf.reshape(image, (input_size[0] * input_size[1], channels)) diff --git a/utilities/segmentation_utils/flowreader.py b/utilities/segmentation_utils/flowreader.py index c5b2511..2bd8301 100644 --- a/utilities/segmentation_utils/flowreader.py +++ b/utilities/segmentation_utils/flowreader.py @@ -33,30 +33,26 @@ def __init__( Additionally, the reader can apply augmentation on the images, and one-hot encode them on the fly. - Parameters: + Parameters ---------- - image (string): path to the image directory - mask (string): path to the mask directory - batch_size (int): batch size - image_size (tuple): image size - output_size (tuple): output size + :string image: path to the image directory + :string mask: path to the mask directory + :int batch_size: batch size + :tuple image_size: image size + :tuple output_size: output size #! Note: in case the output is a column vector it has to be in the shape (x, 1) - num_classes (int): number of classes + :int num_classes: number of classes - Keyword Arguments: + Keyword Arguments ----------------- - shuffle (bool): whether to shuffle the dataset or not - batch_size (int): batch size - seed (int): seed for flow from directory + :bool shuffle: whether to shuffle the dataset or not + :int batch_size: batch size + :int seed: seed for flow from directory - Raises: + Raises ------ ValueError: if the output size is not a tuple of length 2 ValueError: if the output size is not a square matrix or a column vector - - Returns: - ------- - None """ if len(output_size) != 2: @@ -81,13 +77,10 @@ def get_dataset_size(self): """ Returns the length of the dataset - Parameters: - ---------- - None - - Returns: + Returns ------- - int: length of the dataset + :returns: length of the dataset + :rtype: int """ @@ -96,15 +89,6 @@ def get_dataset_size(self): def __make_generator(self): """ Creates the generator - - Parameters: - ---------- - None - - Returns: - ------- - None - """ image_datagen = ImageDataGenerator() @@ -134,13 +118,10 @@ def get_generator(self): """ Returns the generator object - Parameters: - ---------- - None - - Returns: + Returns ------- - generator: generator object + :return: generator object + :rtype: generator """ return self.train_generator @@ -150,13 +131,15 @@ def preprocess(self, generator_zip, state=None): Preprocessor function encapsulates both the image, and mask generator objects. Augments the images and masks and onehot encodes the masks - Parameters: + Parameters ---------- - generator_zip (tuple): tuple of image and mask generator + :tuple generator_zip: tuple of image and mask generator + :int, optional state: random state for reproducibility, defaults to None - Returns: + Returns ------- - a batch (tuple): generator batch of image and mask + :return: generator batch of image and mask + :rtype: batch(tuple) """ for (img, mask) in generator_zip: for i in range(len(img)): From dbd4eb0282dd1dcbe9982fad11b37682eaa00695 Mon Sep 17 00:00:00 2001 From: Sajtospoga01 Date: Fri, 17 Mar 2023 17:21:53 +0000 Subject: [PATCH 36/59] remove pregenerated modules --- docs/modules.rst | 8 ------- docs/tests.rst | 7 ------ docs/utilities.image_cutting.rst | 7 ------ docs/utilities.image_loading.rst | 7 ------ docs/utilities.rst | 24 ------------------- ...s.segmentation_utils.ImagePreprocessor.rst | 7 ------ ...tilities.segmentation_utils.flowreader.rst | 7 ------ docs/utilities.segmentation_utils.rst | 16 ------------- 8 files changed, 83 deletions(-) delete mode 100644 docs/modules.rst delete mode 100644 docs/tests.rst delete mode 100644 docs/utilities.image_cutting.rst delete mode 100644 docs/utilities.image_loading.rst delete mode 100644 docs/utilities.rst delete mode 100644 docs/utilities.segmentation_utils.ImagePreprocessor.rst delete mode 100644 docs/utilities.segmentation_utils.flowreader.rst delete mode 100644 docs/utilities.segmentation_utils.rst diff --git a/docs/modules.rst b/docs/modules.rst deleted file mode 100644 index d62cd7d..0000000 --- a/docs/modules.rst +++ /dev/null @@ -1,8 +0,0 @@ -utilities -========= - -.. toctree:: - :maxdepth: 4 - - tests - utilities diff --git a/docs/tests.rst b/docs/tests.rst deleted file mode 100644 index d5113ea..0000000 --- a/docs/tests.rst +++ /dev/null @@ -1,7 +0,0 @@ -tests package -============= - -.. automodule:: tests - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/utilities.image_cutting.rst b/docs/utilities.image_cutting.rst deleted file mode 100644 index db47b90..0000000 --- a/docs/utilities.image_cutting.rst +++ /dev/null @@ -1,7 +0,0 @@ -utilities.image\_cutting module -=============================== - -.. automodule:: utilities.image_cutting - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/utilities.image_loading.rst b/docs/utilities.image_loading.rst deleted file mode 100644 index 5707cfc..0000000 --- a/docs/utilities.image_loading.rst +++ /dev/null @@ -1,7 +0,0 @@ -utilities.image\_loading module -=============================== - -.. automodule:: utilities.image_loading - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/utilities.rst b/docs/utilities.rst deleted file mode 100644 index 8f80976..0000000 --- a/docs/utilities.rst +++ /dev/null @@ -1,24 +0,0 @@ -utilities package -================= - -.. automodule:: utilities - :members: - :undoc-members: - :show-inheritance: - -Subpackages ------------ - -.. toctree:: - :maxdepth: 4 - - utilities.segmentation_utils - -Submodules ----------- - -.. toctree:: - :maxdepth: 4 - - utilities.image_cutting - utilities.image_loading diff --git a/docs/utilities.segmentation_utils.ImagePreprocessor.rst b/docs/utilities.segmentation_utils.ImagePreprocessor.rst deleted file mode 100644 index ab26b9a..0000000 --- a/docs/utilities.segmentation_utils.ImagePreprocessor.rst +++ /dev/null @@ -1,7 +0,0 @@ -utilities.segmentation\_utils.ImagePreprocessor module -====================================================== - -.. automodule:: utilities.segmentation_utils.ImagePreprocessor - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/utilities.segmentation_utils.flowreader.rst b/docs/utilities.segmentation_utils.flowreader.rst deleted file mode 100644 index 972d67e..0000000 --- a/docs/utilities.segmentation_utils.flowreader.rst +++ /dev/null @@ -1,7 +0,0 @@ -utilities.segmentation\_utils.flowreader module -=============================================== - -.. automodule:: utilities.segmentation_utils.flowreader - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/utilities.segmentation_utils.rst b/docs/utilities.segmentation_utils.rst deleted file mode 100644 index 7fe2dc9..0000000 --- a/docs/utilities.segmentation_utils.rst +++ /dev/null @@ -1,16 +0,0 @@ -utilities.segmentation\_utils package -===================================== - -.. automodule:: utilities.segmentation_utils - :members: - :undoc-members: - :show-inheritance: - -Submodules ----------- - -.. toctree:: - :maxdepth: 4 - - utilities.segmentation_utils.ImagePreprocessor - utilities.segmentation_utils.flowreader From 01a71782ed7750821be24637d01d2cc3d86e887a Mon Sep 17 00:00:00 2001 From: Sajtospoga01 Date: Fri, 17 Mar 2023 17:25:27 +0000 Subject: [PATCH 37/59] updates CD pipeline --- .github/workflows/static.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.github/workflows/static.yml b/.github/workflows/static.yml index aaa14ad..4288d3b 100644 --- a/.github/workflows/static.yml +++ b/.github/workflows/static.yml @@ -41,7 +41,9 @@ jobs: - name: Build documentation run: | cd docs - sphinx-apidoc -e -M -o . .. + + + sphinx-apidoc -e -M --force -o . .. make html - name: Upload build data uses: actions/upload-artifact@v3 From 9ebf9b3db6f064511641f2953410b677a8bfd310 Mon Sep 17 00:00:00 2001 From: Sajtospoga01 Date: Sat, 18 Mar 2023 11:18:33 +0000 Subject: [PATCH 38/59] add extension to sphinx to allow parsion of multiple documentation types --- docs/conf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/conf.py b/docs/conf.py index ef96845..405bc4d 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -23,7 +23,7 @@ # -- General configuration --------------------------------------------------- # https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration add_module_names = False -extensions = ['sphinx.ext.todo', 'sphinx.ext.viewcode', 'sphinx.ext.autodoc'] +extensions = ['sphinx.ext.todo', 'sphinx.ext.viewcode', 'sphinx.ext.autodoc','sphinx.ext.napoleon'] templates_path = ['_templates'] exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] From 44b561968faa282935f073e3c8a267f26af3e2c3 Mon Sep 17 00:00:00 2001 From: Sajtospoga01 Date: Sat, 18 Mar 2023 11:37:07 +0000 Subject: [PATCH 39/59] removes unnecessary import --- utilities/image_cutting.py | 1 - 1 file changed, 1 deletion(-) diff --git a/utilities/image_cutting.py b/utilities/image_cutting.py index 99a1b0f..e84d6df 100644 --- a/utilities/image_cutting.py +++ b/utilities/image_cutting.py @@ -2,7 +2,6 @@ from numpy.typing import NDArray import os import rasterio -from matplotlib import pyplot as plt import pathlib from PIL import Image from typing import Any From ebb5fc845f8f885aca6dc237f9a9e539c1ddf720 Mon Sep 17 00:00:00 2001 From: Sajtospoga01 Date: Sat, 18 Mar 2023 11:37:26 +0000 Subject: [PATCH 40/59] adds setup.py for build specification --- setup.py | 38 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) create mode 100644 setup.py diff --git a/setup.py b/setup.py new file mode 100644 index 0000000..cd279af --- /dev/null +++ b/setup.py @@ -0,0 +1,38 @@ +from setuptools import find_packages, setup + +with open("README.md", "r") as f: + long_description = f.read() + + +setup( + name="guorbit_utils", + version="0.1.0", + author="GU Orbit Software Team", + author_email="", + description="A package containing utilities for GU Orbit Software", + long_description=long_description, + long_description_content_type="text/markdown", + url="", + packages=find_packages(where="utilities"), + package_dir={"": "utilities"}, + license="MIT", + classifiers=[ + "Programming Language :: Python :: 3.10", + "License :: OSI Approved :: MIT License", + "Operating System :: OS Independent", + ], + python_requires=">=3.10", + install_requires=[ + "numpy >= 1.24.0", + "rasterio >= 1.3.6", + "Pillow >= 9.4.0", + "tensorflow >= 2.10", + ], + extras_require={ + "dev": [ + "pytest >= 7.2.2", + "pytest-cov >= 4.0.0", + "twine >= 4.0.0", + ] + }, +) \ No newline at end of file From 51a701590d2d3433ef961e3e1aa9fa05789beaa6 Mon Sep 17 00:00:00 2001 From: Andras Bodrogai Date: Sat, 18 Mar 2023 13:14:42 +0000 Subject: [PATCH 41/59] fixes bug in flow reader and agumentation pipeline that causes masks not to be reshaped during augmentation, resulting in crash --- utilities/segmentation_utils/ImagePreprocessor.py | 10 +++++++--- utilities/segmentation_utils/flowreader.py | 11 ++++++++++- 2 files changed, 17 insertions(+), 4 deletions(-) diff --git a/utilities/segmentation_utils/ImagePreprocessor.py b/utilities/segmentation_utils/ImagePreprocessor.py index fb71056..e35d365 100644 --- a/utilities/segmentation_utils/ImagePreprocessor.py +++ b/utilities/segmentation_utils/ImagePreprocessor.py @@ -110,6 +110,7 @@ def augmentation_pipeline( mask, input_size, output_size, + output_reshape, image_queue: PreprocessingQueue = None, mask_queue: PreprocessingQueue = None, channels=3, @@ -126,6 +127,7 @@ def augmentation_pipeline( :tf.Tensor mask: The mask to be processed :tuple(int, int) input_size: Input size of the image :tuple(int, int) output_size: Output size of the image + :tuple(int, int) output_reshape: In case the image is a column vector, this is the shape it should be reshaped to Keyword Arguments ----------------- @@ -146,18 +148,19 @@ def augmentation_pipeline( # reshapes masks, such that transforamtions work properly if output_size[1] == 1: - mask = tf.reshape(mask, output_size) + mask = tf.reshape(mask, (output_reshape[0], output_reshape[1], 1)) if image_queue == None and mask_queue == None: #!Possibly in the wrong place as it has to be regenerated every time image_queue, mask_queue = generate_default_queue() - print("No queue passed, using default queue") + # print("No queue passed, using default queue") elif image_queue == None or mask_queue == None: raise ValueError("Both queues must be passed or none") image_queue.update_seed(seed) mask_queue.update_seed(seed) + for i, fun in enumerate(image_queue.queue): image = fun(image, **image_queue.arguments[i]) @@ -188,4 +191,5 @@ def flatten(image, input_size, channels=1): :return: flattened image :rtype: tf.Tensor """ - return tf.reshape(image, (input_size[0] * input_size[1], channels)) + #the 1 is required to preserve the shape similar to the original + return tf.reshape(image, (input_size[0] * input_size[1],1, channels)) diff --git a/utilities/segmentation_utils/flowreader.py b/utilities/segmentation_utils/flowreader.py index 2bd8301..6db019c 100644 --- a/utilities/segmentation_utils/flowreader.py +++ b/utilities/segmentation_utils/flowreader.py @@ -3,6 +3,7 @@ """ import os +import math import numpy as np from keras.preprocessing.image import ImageDataGenerator from utilities.segmentation_utils import ImagePreprocessor @@ -41,6 +42,7 @@ def __init__( :tuple image_size: image size :tuple output_size: output size #! Note: in case the output is a column vector it has to be in the shape (x, 1) + :int num_classes: number of classes Keyword Arguments @@ -61,6 +63,7 @@ def __init__( raise ValueError( "The output size has to be a square matrix or a column vector" ) + self.image_path = image_path self.mask_path = mask_path @@ -94,6 +97,12 @@ def __make_generator(self): image_datagen = ImageDataGenerator() mask_datagen = ImageDataGenerator() + if self.output_size[1] == 1: + # only enters if the output is a column vector + # such no need to define it otherwise + dimension = math.sqrt(self.output_size[0]) + self.output_reshape = (int(dimension), int(dimension)) + image_generator = image_datagen.flow_from_directory( self.image_path, class_mode=None, @@ -151,7 +160,7 @@ def preprocess(self, generator_zip, state=None): image_seed = state.randint(0, 100000) img[i], mask[i] = ImagePreprocessor.augmentation_pipeline( - img[i], mask[i], self.image_size, self.output_size, seed=image_seed + img[i], mask[i], self.image_size, self.output_size,self.output_reshape, seed=0 ) mask = ImagePreprocessor.onehot_encode( mask, self.output_size, self.num_classes From 9c06dbe48e8d40a879aaf8cc15eba8749096b7e3 Mon Sep 17 00:00:00 2001 From: Andras Bodrogai Date: Sat, 18 Mar 2023 13:27:48 +0000 Subject: [PATCH 42/59] remove napoleon from sphinx --- docs/conf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/conf.py b/docs/conf.py index 405bc4d..ef96845 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -23,7 +23,7 @@ # -- General configuration --------------------------------------------------- # https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration add_module_names = False -extensions = ['sphinx.ext.todo', 'sphinx.ext.viewcode', 'sphinx.ext.autodoc','sphinx.ext.napoleon'] +extensions = ['sphinx.ext.todo', 'sphinx.ext.viewcode', 'sphinx.ext.autodoc'] templates_path = ['_templates'] exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] From cc94418c6693ee01b23115563ddb6b819c107156 Mon Sep 17 00:00:00 2001 From: Sajtospoga01 Date: Sat, 18 Mar 2023 17:15:38 +0000 Subject: [PATCH 43/59] updates tests to mainly rely on tensors, and to adapt to the changes in the imagepreprocessor --- .../image_preprocessor_test.py | 54 ++++++++++++++----- .../segmentation_utils/ImagePreprocessor.py | 37 +++++++------ 2 files changed, 58 insertions(+), 33 deletions(-) diff --git a/tests/segmentation_utils_tests.py/image_preprocessor_test.py b/tests/segmentation_utils_tests.py/image_preprocessor_test.py index 3436d7a..006684d 100644 --- a/tests/segmentation_utils_tests.py/image_preprocessor_test.py +++ b/tests/segmentation_utils_tests.py/image_preprocessor_test.py @@ -1,5 +1,6 @@ from utilities.segmentation_utils import ImagePreprocessor import numpy as np +import tensorflow as tf def test_image_onehot_encoder() -> None: @@ -8,17 +9,15 @@ def test_image_onehot_encoder() -> None: n_classes = 2 batch_size = 1 image_size = (512, 512) - output_size = (256,256) - + output_size = (256, 256) # creating a mask with 2 classes mask = np.zeros((batch_size, output_size[0] * output_size[1])) mask[:, ::2] = 1 + # creating a onehot mask to compare with the output of the function - onehot_test = np.zeros( - (batch_size, output_size[0] * output_size[1], n_classes) - ) + onehot_test = np.zeros((batch_size, output_size[0] * output_size[1], n_classes)) onehot_test[:, ::2, 1] = 1 onehot_test[:, 1::2, 0] = 1 @@ -36,8 +35,12 @@ def test_image_augmentation_pipeline_column() -> None: # predifining input variables image = np.zeros((512, 512, 3)) mask = np.zeros((256 * 256, 1)) + image = tf.convert_to_tensor(image) + mask = tf.convert_to_tensor(mask) + input_size = (512, 512) output_size = (256 * 256, 1) + output_reshape = (256, 256) seed = 0 # createing dummy queues @@ -49,36 +52,48 @@ def test_image_augmentation_pipeline_column() -> None: ) image_new, mask_new = ImagePreprocessor.augmentation_pipeline( - image, mask, input_size, output_size, image_queue, mask_queue + image, mask, input_size, output_size, output_reshape, image_queue, mask_queue ) + image_new = image_new.numpy() + mask_new = mask_new.numpy() - assert image_new.shape == (512, 512, 3) - assert mask_new.shape == (256 * 256, 1) + assert np.array(image_new).shape == (512, 512, 3) + assert np.array(mask_new).shape == (256 * 256, 1, 1) def test_image_augmentation_pipeline_no_queue() -> None: # predifining input variables image = np.zeros((512, 512, 3)) mask = np.zeros((256, 256, 1)) + image = tf.convert_to_tensor(image) + mask = tf.convert_to_tensor(mask) + input_size = (512, 512) output_size = (256, 256) + output_reshape = (256, 256) seed = 0 image_new, mask_new = ImagePreprocessor.augmentation_pipeline( image, mask, input_size, output_size ) + image_new = image_new.numpy() + mask_new = mask_new.numpy() assert image_new.shape == (512, 512, 3) assert mask_new.shape == (256, 256, 1) -def test_image_augmentation_pipeline_error_raised()-> None: +def test_image_augmentation_pipeline_error_raised() -> None: try: # predifining input variables image = np.zeros((512, 512, 3)) mask = np.zeros((256, 256, 1)) + image = tf.convert_to_tensor(image) + mask = tf.convert_to_tensor(mask) + input_size = (512, 512) output_size = (256, 256) + output_reshape = (256, 256) seed = 0 # createing dummy queues image_queue = ImagePreprocessor.PreprocessingQueue( @@ -86,7 +101,7 @@ def test_image_augmentation_pipeline_error_raised()-> None: ) image_new, mask_new = ImagePreprocessor.augmentation_pipeline( - image, mask, input_size, output_size, image_queue + image, mask, input_size, output_size, image_queue=image_queue ) assert False except ValueError: @@ -97,6 +112,9 @@ def test_image_augmentation_pipeline_squarematrix() -> None: # predifining input variables image = np.zeros((512, 512, 3)) mask = np.zeros((256, 256, 1)) + image = tf.convert_to_tensor(image) + mask = tf.convert_to_tensor(mask) + input_size = (512, 512) output_size = (256, 256) seed = 0 @@ -110,8 +128,15 @@ def test_image_augmentation_pipeline_squarematrix() -> None: ) image_new, mask_new = ImagePreprocessor.augmentation_pipeline( - image, mask, input_size, output_size, image_queue, mask_queue + image, + mask, + input_size, + output_size, + image_queue=image_queue, + mask_queue=mask_queue, ) + image_new = image_new.numpy() + mask_new = mask_new.numpy() assert image_new.shape == (512, 512, 3) assert mask_new.shape == (256, 256, 1) @@ -149,9 +174,10 @@ def test_generate_default_queue() -> None: def test_flatten() -> None: image = np.zeros((512, 512, 3)) + image = tf.convert_to_tensor(image) image = ImagePreprocessor.flatten(image, (512, 512), 3) - assert image.shape == (512 * 512, 3) - + image = image.numpy() + assert image.shape == (512 * 512, 1, 3) -#TODO: add tests for checking if errors are raised when the input is not correct \ No newline at end of file +# TODO: add tests for checking if errors are raised when the input is not correct diff --git a/utilities/segmentation_utils/ImagePreprocessor.py b/utilities/segmentation_utils/ImagePreprocessor.py index e35d365..d8f68e4 100644 --- a/utilities/segmentation_utils/ImagePreprocessor.py +++ b/utilities/segmentation_utils/ImagePreprocessor.py @@ -91,15 +91,13 @@ def onehot_encode(masks, output_size, num_classes): :batch(tf.Tensor) masks: Masks to be encoded :tuple(int, int) output_size: Output size of the masks :int num_classes: Number of classes in the masks - + Returns ------- :return: Encoded masks :rtype: batch(tf.Tensor) - """ - encoded = np.zeros( - (masks.shape[0], output_size[0] * output_size[1] , num_classes) - ) + """ + encoded = np.zeros((masks.shape[0], output_size[0] * output_size[1], num_classes)) for i in range(num_classes): encoded[:, :, i] = tf.squeeze((masks == i).astype(int)) return encoded @@ -108,13 +106,13 @@ def onehot_encode(masks, output_size, num_classes): def augmentation_pipeline( image, mask, - input_size, - output_size, - output_reshape, + input_size: tuple[int, int], + output_size: tuple[int, int], + output_reshape: tuple[int, int] = None, image_queue: PreprocessingQueue = None, mask_queue: PreprocessingQueue = None, - channels=3, - seed=0, + channels: int = 3, + seed: int = 0, ): """ Function that can execute a set of predifined augmentation functions @@ -127,10 +125,11 @@ def augmentation_pipeline( :tf.Tensor mask: The mask to be processed :tuple(int, int) input_size: Input size of the image :tuple(int, int) output_size: Output size of the image - :tuple(int, int) output_reshape: In case the image is a column vector, this is the shape it should be reshaped to - + + Keyword Arguments ----------------- + :tuple(int, int), optional output_reshape: In case the image is a column vector, this is the shape it should be reshaped to. Defaults to None. :PreprocessingQueue, optional mask_queue image_queue: Augmentation processing queue for images, defaults to None :PreprocessingQueue, optional mask_queue: Augmentation processing queue for masks, defaults to None :int, optional channels: Number of bands in the image, defaults to 3 @@ -139,12 +138,12 @@ def augmentation_pipeline( Raises ------ :raises ValueError: If only one of the queues is passed - + Returns ------- :return: tuple of the processed image and mask :rtype: tuple(tf.Tensor, tf.Tensor) - """ + """ # reshapes masks, such that transforamtions work properly if output_size[1] == 1: @@ -154,10 +153,10 @@ def augmentation_pipeline( #!Possibly in the wrong place as it has to be regenerated every time image_queue, mask_queue = generate_default_queue() # print("No queue passed, using default queue") - + elif image_queue == None or mask_queue == None: raise ValueError("Both queues must be passed or none") - + image_queue.update_seed(seed) mask_queue.update_seed(seed) @@ -185,11 +184,11 @@ def flatten(image, input_size, channels=1): Keyword Arguments ----------------- :int, optional channels: number of chanels to preserve, defaults to 1 - + Returns ------- :return: flattened image :rtype: tf.Tensor """ - #the 1 is required to preserve the shape similar to the original - return tf.reshape(image, (input_size[0] * input_size[1],1, channels)) + # the 1 is required to preserve the shape similar to the original + return tf.reshape(image, (input_size[0] * input_size[1], 1, channels)) From 3f6ffccd00c05d95b355d361753c54fde13ab638 Mon Sep 17 00:00:00 2001 From: Sajtospoga01 Date: Sat, 18 Mar 2023 17:21:43 +0000 Subject: [PATCH 44/59] moves documentation branch to staging --- .github/workflows/static.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/static.yml b/.github/workflows/static.yml index 4288d3b..daa9701 100644 --- a/.github/workflows/static.yml +++ b/.github/workflows/static.yml @@ -4,7 +4,7 @@ name: Deploy Documentation on Pages on: # Runs on pushes targeting the default branch push: - branches: ["sphinx_documentation"] + branches: ["staging"] # Allows you to run this workflow manually from the Actions tab workflow_dispatch: From 40aaf0c0f27d7d69b2940438f72bcadd727a716f Mon Sep 17 00:00:00 2001 From: Sajtospoga01 Date: Sat, 18 Mar 2023 20:03:19 +0000 Subject: [PATCH 45/59] updates flowreader to generate the default queue instead of augmentation pipeline, adjust tests accordingly --- .../flow_reader_test.py | 61 +++++++++++++++++-- .../image_preprocessor_test.py | 49 --------------- .../segmentation_utils/ImagePreprocessor.py | 6 -- utilities/segmentation_utils/flowreader.py | 44 +++++++++---- 4 files changed, 87 insertions(+), 73 deletions(-) diff --git a/tests/segmentation_utils_tests.py/flow_reader_test.py b/tests/segmentation_utils_tests.py/flow_reader_test.py index a031da8..04b4695 100644 --- a/tests/segmentation_utils_tests.py/flow_reader_test.py +++ b/tests/segmentation_utils_tests.py/flow_reader_test.py @@ -1,9 +1,8 @@ import os - import numpy as np +import tensorflow as tf from keras.preprocessing.image import ImageDataGenerator from pytest import MonkeyPatch - from utilities.segmentation_utils import ImagePreprocessor from utilities.segmentation_utils.flowreader import FlowGenerator @@ -41,13 +40,37 @@ def test_makes_flow_generator() -> None: "flow_from_directory", flow_from_directory_mock, ) - patch.setattr(FlowGenerator, "preprocess", lambda self, x: x) - + patch.setattr(FlowGenerator, "preprocess", lambda self, x, *args, **kwargs: x) # create a flow generator flow_generator = FlowGenerator(**generator_args) pass +def test_makes_flow_generator_with_queue() -> None: + patch = MonkeyPatch() + # mock an imagedatagenerator from keras + mock_image_datagen = patch.setattr( + ImageDataGenerator, + "flow_from_directory", + flow_from_directory_mock, + ) + patch.setattr(FlowGenerator, "preprocess", lambda self, x, *args, **kwargs: x) + + # create dummy queues + image_queue = ImagePreprocessor.PreprocessingQueue([lambda x, y, seed: x], [{"y": 1}]) + mask_queue = ImagePreprocessor.PreprocessingQueue([lambda x, y, seed: x], [{"y": 1}]) + + # create a copy of the generator args + new_generator_args = generator_args.copy() + new_generator_args["preprocessing_queue_image"] = image_queue + new_generator_args["preprocessing_queue_mask"] = mask_queue + + # create a flow generator + flow_generator = FlowGenerator(**new_generator_args) + + + + def test_makes_flow_generator_wrong_shape() -> None: try: patch = MonkeyPatch() @@ -57,7 +80,7 @@ def test_makes_flow_generator_wrong_shape() -> None: "flow_from_directory", flow_from_directory_mock, ) - patch.setattr(FlowGenerator, "preprocess", lambda self, x: x) + patch.setattr(FlowGenerator, "preprocess", lambda self, x, *args, **kwargs: x) fail_generator = generator_args.copy() # create a flow generator @@ -77,7 +100,7 @@ def test_makes_flow_generator_wrong_dimension() -> None: "flow_from_directory", flow_from_directory_mock, ) - patch.setattr(FlowGenerator, "preprocess", lambda self, x: x) + patch.setattr(FlowGenerator, "preprocess", lambda self, x, *args, **kwargs: x) fail_generator = generator_args.copy() # create a flow generator @@ -154,3 +177,29 @@ def test_get_generator() -> None: assert generator != None patch.undo() patch.undo() + + +def test_reader_error_raised() -> None: + try: + # predifining input variables + image = np.zeros((512, 512, 3)) + mask = np.zeros((256, 256, 1)) + image = tf.convert_to_tensor(image) + mask = tf.convert_to_tensor(mask) + + input_size = (512, 512) + output_size = (256, 256) + output_reshape = (256, 256) + seed = 0 + # createing dummy queues + image_queue = ImagePreprocessor.PreprocessingQueue( + queue=[lambda x, y, seed: x], arguments=[{"y": 1}] + ) + + # create a flow generator + flow_generator = FlowGenerator( + preprocessing_queue_image=image_queue, **generator_args + ) + assert False + except ValueError: + assert True diff --git a/tests/segmentation_utils_tests.py/image_preprocessor_test.py b/tests/segmentation_utils_tests.py/image_preprocessor_test.py index 006684d..f6e1454 100644 --- a/tests/segmentation_utils_tests.py/image_preprocessor_test.py +++ b/tests/segmentation_utils_tests.py/image_preprocessor_test.py @@ -61,53 +61,6 @@ def test_image_augmentation_pipeline_column() -> None: assert np.array(mask_new).shape == (256 * 256, 1, 1) -def test_image_augmentation_pipeline_no_queue() -> None: - # predifining input variables - image = np.zeros((512, 512, 3)) - mask = np.zeros((256, 256, 1)) - image = tf.convert_to_tensor(image) - mask = tf.convert_to_tensor(mask) - - input_size = (512, 512) - output_size = (256, 256) - output_reshape = (256, 256) - seed = 0 - - image_new, mask_new = ImagePreprocessor.augmentation_pipeline( - image, mask, input_size, output_size - ) - image_new = image_new.numpy() - mask_new = mask_new.numpy() - - assert image_new.shape == (512, 512, 3) - assert mask_new.shape == (256, 256, 1) - - -def test_image_augmentation_pipeline_error_raised() -> None: - try: - # predifining input variables - image = np.zeros((512, 512, 3)) - mask = np.zeros((256, 256, 1)) - image = tf.convert_to_tensor(image) - mask = tf.convert_to_tensor(mask) - - input_size = (512, 512) - output_size = (256, 256) - output_reshape = (256, 256) - seed = 0 - # createing dummy queues - image_queue = ImagePreprocessor.PreprocessingQueue( - queue=[lambda x, y, seed: x], arguments=[{"y": 1}] - ) - - image_new, mask_new = ImagePreprocessor.augmentation_pipeline( - image, mask, input_size, output_size, image_queue=image_queue - ) - assert False - except ValueError: - assert True - - def test_image_augmentation_pipeline_squarematrix() -> None: # predifining input variables image = np.zeros((512, 512, 3)) @@ -179,5 +132,3 @@ def test_flatten() -> None: image = image.numpy() assert image.shape == (512 * 512, 1, 3) - -# TODO: add tests for checking if errors are raised when the input is not correct diff --git a/utilities/segmentation_utils/ImagePreprocessor.py b/utilities/segmentation_utils/ImagePreprocessor.py index d8f68e4..739a144 100644 --- a/utilities/segmentation_utils/ImagePreprocessor.py +++ b/utilities/segmentation_utils/ImagePreprocessor.py @@ -149,13 +149,7 @@ def augmentation_pipeline( if output_size[1] == 1: mask = tf.reshape(mask, (output_reshape[0], output_reshape[1], 1)) - if image_queue == None and mask_queue == None: - #!Possibly in the wrong place as it has to be regenerated every time - image_queue, mask_queue = generate_default_queue() - # print("No queue passed, using default queue") - elif image_queue == None or mask_queue == None: - raise ValueError("Both queues must be passed or none") image_queue.update_seed(seed) mask_queue.update_seed(seed) diff --git a/utilities/segmentation_utils/flowreader.py b/utilities/segmentation_utils/flowreader.py index 6db019c..bd0e320 100644 --- a/utilities/segmentation_utils/flowreader.py +++ b/utilities/segmentation_utils/flowreader.py @@ -19,14 +19,17 @@ class FlowGenerator: def __init__( self, - image_path, - mask_path, - image_size, - output_size, - num_classes, - shuffle=True, - batch_size=2, - seed=909, + image_path: str, + mask_path: str, + image_size: tuple[int, int], + output_size: tuple[int, int], + num_classes:int, + shuffle:bool=True, + batch_size:int=2, + seed:int=909, + preprocessing_seed:int=None, + preprocessing_queue_image:ImagePreprocessor.PreprocessingQueue = None, + preprocessing_queue_mask:ImagePreprocessor.PreprocessingQueue = None, ): """ Initializes the flow generator object, @@ -50,6 +53,9 @@ def __init__( :bool shuffle: whether to shuffle the dataset or not :int batch_size: batch size :int seed: seed for flow from directory + :int preprocessing_seed: seed for preprocessing, defaults to None + :PreprocessingQueue preprocessing_queue_image: preprocessing queue for images + :PreprocessingQueue preprocessing_queue_mask: preprocessing queue for masks Raises ------ @@ -63,7 +69,6 @@ def __init__( raise ValueError( "The output size has to be a square matrix or a column vector" ) - self.image_path = image_path self.mask_path = mask_path @@ -73,6 +78,9 @@ def __init__( self.num_classes = num_classes self.shuffle = shuffle self.seed = seed + self.preprocessing_queue_image = preprocessing_queue_image + self.preprocessing_queue_mask = preprocessing_queue_mask + self.preprocessing_seed = preprocessing_seed self.__make_generator() print("Reading images from: ", self.image_path) @@ -119,9 +127,14 @@ def __make_generator(self): target_size=self.output_size, color_mode="grayscale", ) + if self.preprocessing_queue_image == None and self.preprocessing_queue_mask == None: + #!Possibly in the wrong place as it has to be regenerated every time + self.preprocessing_queue_image, self.preprocessing_queue_mask = ImagePreprocessor.generate_default_queue() + elif self.preprocessing_queue_image == None or self.preprocessing_queue_mask == None: + raise ValueError("Both queues must be passed or none") self.train_generator = zip(image_generator, mask_generator) - self.train_generator = self.preprocess(self.train_generator) + self.train_generator = self.preprocess(self.train_generator, self.preprocessing_queue_image, self.preprocessing_queue_mask, self.preprocessing_seed) def get_generator(self): """ @@ -135,7 +148,7 @@ def get_generator(self): """ return self.train_generator - def preprocess(self, generator_zip, state=None): + def preprocess(self, generator_zip, preprocessing_queue_image, preprocessing_queue_mask,state=None): """ Preprocessor function encapsulates both the image, and mask generator objects. Augments the images and masks and onehot encodes the masks @@ -160,7 +173,14 @@ def preprocess(self, generator_zip, state=None): image_seed = state.randint(0, 100000) img[i], mask[i] = ImagePreprocessor.augmentation_pipeline( - img[i], mask[i], self.image_size, self.output_size,self.output_reshape, seed=0 + img[i], + mask[i], + self.image_size, + self.output_size, + self.output_reshape, + seed=0, + # preprocessing_queue_image=preprocessing_queue_image, + # preprocessing_queue_mask=preprocessing_queue_mask, ) mask = ImagePreprocessor.onehot_encode( mask, self.output_size, self.num_classes From 968be45bfe9517c29aa1ed6b6cce19d2b632c90f Mon Sep 17 00:00:00 2001 From: Andras Bodrogai Date: Sat, 18 Mar 2023 20:22:31 +0000 Subject: [PATCH 46/59] fixes bug in flowreader where some important variable assignments were commented out --- utilities/segmentation_utils/flowreader.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/utilities/segmentation_utils/flowreader.py b/utilities/segmentation_utils/flowreader.py index bd0e320..1bcb039 100644 --- a/utilities/segmentation_utils/flowreader.py +++ b/utilities/segmentation_utils/flowreader.py @@ -179,8 +179,8 @@ def preprocess(self, generator_zip, preprocessing_queue_image, preprocessing_que self.output_size, self.output_reshape, seed=0, - # preprocessing_queue_image=preprocessing_queue_image, - # preprocessing_queue_mask=preprocessing_queue_mask, + image_queue=preprocessing_queue_image, + mask_queue=preprocessing_queue_mask, ) mask = ImagePreprocessor.onehot_encode( mask, self.output_size, self.num_classes From f47bb2595d5a42b5606ec08ad58dfc05a2b8590b Mon Sep 17 00:00:00 2001 From: Sajtospoga01 Date: Sat, 18 Mar 2023 20:25:39 +0000 Subject: [PATCH 47/59] point documentation to master temporarily --- .github/workflows/static.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/static.yml b/.github/workflows/static.yml index daa9701..21341d9 100644 --- a/.github/workflows/static.yml +++ b/.github/workflows/static.yml @@ -4,7 +4,7 @@ name: Deploy Documentation on Pages on: # Runs on pushes targeting the default branch push: - branches: ["staging"] + branches: ["main"] # Allows you to run this workflow manually from the Actions tab workflow_dispatch: From 349ab639f6d30ccfd042572fe70da409c70a83e0 Mon Sep 17 00:00:00 2001 From: Andras Bodrogai Date: Sat, 18 Mar 2023 20:28:00 +0000 Subject: [PATCH 48/59] adds seed assignment in preprocess --- utilities/segmentation_utils/flowreader.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utilities/segmentation_utils/flowreader.py b/utilities/segmentation_utils/flowreader.py index 1bcb039..7c75a2a 100644 --- a/utilities/segmentation_utils/flowreader.py +++ b/utilities/segmentation_utils/flowreader.py @@ -178,7 +178,7 @@ def preprocess(self, generator_zip, preprocessing_queue_image, preprocessing_que self.image_size, self.output_size, self.output_reshape, - seed=0, + seed=image_seed, image_queue=preprocessing_queue_image, mask_queue=preprocessing_queue_mask, ) From abdd64828259cd320bf255975fa86dbb47f2ca85 Mon Sep 17 00:00:00 2001 From: Andras Bodrogai Date: Sat, 18 Mar 2023 20:28:35 +0000 Subject: [PATCH 49/59] adds karg assignment for preprocessor call --- utilities/segmentation_utils/flowreader.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utilities/segmentation_utils/flowreader.py b/utilities/segmentation_utils/flowreader.py index 7c75a2a..2cce3cd 100644 --- a/utilities/segmentation_utils/flowreader.py +++ b/utilities/segmentation_utils/flowreader.py @@ -134,7 +134,7 @@ def __make_generator(self): raise ValueError("Both queues must be passed or none") self.train_generator = zip(image_generator, mask_generator) - self.train_generator = self.preprocess(self.train_generator, self.preprocessing_queue_image, self.preprocessing_queue_mask, self.preprocessing_seed) + self.train_generator = self.preprocess(self.train_generator, self.preprocessing_queue_image, self.preprocessing_queue_mask, state = self.preprocessing_seed) def get_generator(self): """ From e67f9f1a3e4bc26e9c3d6144a28978873b84bca5 Mon Sep 17 00:00:00 2001 From: Andras Bodrogai Date: Sat, 18 Mar 2023 20:49:01 +0000 Subject: [PATCH 50/59] adds ability to disable the preprocessing pipeline --- utilities/segmentation_utils/flowreader.py | 45 ++++++++++++---------- 1 file changed, 25 insertions(+), 20 deletions(-) diff --git a/utilities/segmentation_utils/flowreader.py b/utilities/segmentation_utils/flowreader.py index 2cce3cd..c0fef17 100644 --- a/utilities/segmentation_utils/flowreader.py +++ b/utilities/segmentation_utils/flowreader.py @@ -26,10 +26,12 @@ def __init__( num_classes:int, shuffle:bool=True, batch_size:int=2, + preprocessing_enabled:bool=True, seed:int=909, preprocessing_seed:int=None, preprocessing_queue_image:ImagePreprocessor.PreprocessingQueue = None, preprocessing_queue_mask:ImagePreprocessor.PreprocessingQueue = None, + ): """ Initializes the flow generator object, @@ -52,6 +54,7 @@ def __init__( ----------------- :bool shuffle: whether to shuffle the dataset or not :int batch_size: batch size + :bool preprocessing_enabled: whether to apply preprocessing or not :int seed: seed for flow from directory :int preprocessing_seed: seed for preprocessing, defaults to None :PreprocessingQueue preprocessing_queue_image: preprocessing queue for images @@ -78,6 +81,7 @@ def __init__( self.num_classes = num_classes self.shuffle = shuffle self.seed = seed + self.preprocessing_enabled = preprocessing_enabled self.preprocessing_queue_image = preprocessing_queue_image self.preprocessing_queue_mask = preprocessing_queue_mask self.preprocessing_seed = preprocessing_seed @@ -134,7 +138,7 @@ def __make_generator(self): raise ValueError("Both queues must be passed or none") self.train_generator = zip(image_generator, mask_generator) - self.train_generator = self.preprocess(self.train_generator, self.preprocessing_queue_image, self.preprocessing_queue_mask, state = self.preprocessing_seed) + self.train_generator = self.preprocess(self.train_generator) def get_generator(self): """ @@ -148,7 +152,7 @@ def get_generator(self): """ return self.train_generator - def preprocess(self, generator_zip, preprocessing_queue_image, preprocessing_queue_mask,state=None): + def preprocess(self, generator_zip): """ Preprocessor function encapsulates both the image, and mask generator objects. Augments the images and masks and onehot encodes the masks @@ -164,24 +168,25 @@ def preprocess(self, generator_zip, preprocessing_queue_image, preprocessing_que :rtype: batch(tuple) """ for (img, mask) in generator_zip: - for i in range(len(img)): - # random state for reproducibility - if state is None: - image_seed = np.random.randint(0, 100000) - else: - state = np.random.RandomState(state) - image_seed = state.randint(0, 100000) - - img[i], mask[i] = ImagePreprocessor.augmentation_pipeline( - img[i], - mask[i], - self.image_size, - self.output_size, - self.output_reshape, - seed=image_seed, - image_queue=preprocessing_queue_image, - mask_queue=preprocessing_queue_mask, - ) + if self.preprocessing_enabled: + for i in range(len(img)): + # random state for reproducibility + if self.preprocessing_seed is None: + image_seed = np.random.randint(0, 100000) + else: + state = np.random.RandomState(state) + image_seed = state.randint(0, 100000) + + img[i], mask[i] = ImagePreprocessor.augmentation_pipeline( + img[i], + mask[i], + self.image_size, + self.output_size, + self.output_reshape, + seed=image_seed, + image_queue=self.preprocessing_queue_image, + mask_queue=self.preprocessing_queue_mask, + ) mask = ImagePreprocessor.onehot_encode( mask, self.output_size, self.num_classes ) From ccf7e944d94e8b2e57b08778b3ddb18b13afb44e Mon Sep 17 00:00:00 2001 From: Sajtospoga01 Date: Sat, 18 Mar 2023 21:19:25 +0000 Subject: [PATCH 51/59] updates image preprocessor and flowreader to comply with linting --- .../segmentation_utils/ImagePreprocessor.py | 19 +++++++---- utilities/segmentation_utils/flowreader.py | 34 ++++++++++--------- 2 files changed, 30 insertions(+), 23 deletions(-) diff --git a/utilities/segmentation_utils/ImagePreprocessor.py b/utilities/segmentation_utils/ImagePreprocessor.py index 739a144..843843c 100644 --- a/utilities/segmentation_utils/ImagePreprocessor.py +++ b/utilities/segmentation_utils/ImagePreprocessor.py @@ -1,7 +1,8 @@ -import numpy as np -import tensorflow as tf from dataclasses import dataclass from typing import Callable, Dict +import numpy as np +import tensorflow as tf + @dataclass @@ -129,9 +130,15 @@ def augmentation_pipeline( Keyword Arguments ----------------- - :tuple(int, int), optional output_reshape: In case the image is a column vector, this is the shape it should be reshaped to. Defaults to None. - :PreprocessingQueue, optional mask_queue image_queue: Augmentation processing queue for images, defaults to None - :PreprocessingQueue, optional mask_queue: Augmentation processing queue for masks, defaults to None + :tuple(int, int), optional output_reshape: In case the image is a column vector, + this is the shape it should be reshaped to. Defaults to None. + + :PreprocessingQueue, optional mask_queue image_queue: + Augmentation processing queue for images, defaults to None + + :PreprocessingQueue, optional mask_queue: Augmentation processing queue + for masks, defaults to None + :int, optional channels: Number of bands in the image, defaults to 3 :int, optional seed: The seed to be used in the pipeline, defaults to 0 @@ -149,8 +156,6 @@ def augmentation_pipeline( if output_size[1] == 1: mask = tf.reshape(mask, (output_reshape[0], output_reshape[1], 1)) - - image_queue.update_seed(seed) mask_queue.update_seed(seed) diff --git a/utilities/segmentation_utils/flowreader.py b/utilities/segmentation_utils/flowreader.py index c0fef17..f3e94d5 100644 --- a/utilities/segmentation_utils/flowreader.py +++ b/utilities/segmentation_utils/flowreader.py @@ -23,15 +23,14 @@ def __init__( mask_path: str, image_size: tuple[int, int], output_size: tuple[int, int], - num_classes:int, - shuffle:bool=True, - batch_size:int=2, - preprocessing_enabled:bool=True, - seed:int=909, - preprocessing_seed:int=None, - preprocessing_queue_image:ImagePreprocessor.PreprocessingQueue = None, - preprocessing_queue_mask:ImagePreprocessor.PreprocessingQueue = None, - + num_classes: int, + shuffle: bool = True, + batch_size: int = 2, + preprocessing_enabled: bool = True, + seed: int = 909, + preprocessing_seed: int = None, + preprocessing_queue_image: ImagePreprocessor.PreprocessingQueue = None, + preprocessing_queue_mask: ImagePreprocessor.PreprocessingQueue = None, ): """ Initializes the flow generator object, @@ -131,10 +130,13 @@ def __make_generator(self): target_size=self.output_size, color_mode="grayscale", ) - if self.preprocessing_queue_image == None and self.preprocessing_queue_mask == None: + if self.preprocessing_queue_image is None and self.preprocessing_queue_mask is None: #!Possibly in the wrong place as it has to be regenerated every time - self.preprocessing_queue_image, self.preprocessing_queue_mask = ImagePreprocessor.generate_default_queue() - elif self.preprocessing_queue_image == None or self.preprocessing_queue_mask == None: + ( + self.preprocessing_queue_image, + self.preprocessing_queue_mask, + ) = ImagePreprocessor.generate_default_queue() + elif self.preprocessing_queue_image is None or self.preprocessing_queue_mask is None: raise ValueError("Both queues must be passed or none") self.train_generator = zip(image_generator, mask_generator) @@ -169,7 +171,7 @@ def preprocess(self, generator_zip): """ for (img, mask) in generator_zip: if self.preprocessing_enabled: - for i in range(len(img)): + for i_image,i_mask in zip(img, mask): # random state for reproducibility if self.preprocessing_seed is None: image_seed = np.random.randint(0, 100000) @@ -177,9 +179,9 @@ def preprocess(self, generator_zip): state = np.random.RandomState(state) image_seed = state.randint(0, 100000) - img[i], mask[i] = ImagePreprocessor.augmentation_pipeline( - img[i], - mask[i], + i_image, i_mask = ImagePreprocessor.augmentation_pipeline( + i_image, + i_mask, self.image_size, self.output_size, self.output_reshape, From 65c7ba44d20960d01bf0ef0d84a1b2ec3bb50939 Mon Sep 17 00:00:00 2001 From: Sajtospoga01 Date: Sat, 18 Mar 2023 22:23:14 +0000 Subject: [PATCH 52/59] adds pylint configuration file --- .pylintrc | 790 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 790 insertions(+) create mode 100644 .pylintrc diff --git a/.pylintrc b/.pylintrc new file mode 100644 index 0000000..bf5e2eb --- /dev/null +++ b/.pylintrc @@ -0,0 +1,790 @@ +[MAIN] + +# Analyse import fallback blocks. This can be used to support both Python 2 and +# 3 compatible code, which means that the block might have code that exists +# only in one or another interpreter, leading to false positives when analysed. +analyse-fallback-blocks=no + +# Load and enable all available extensions. Use --list-extensions to see a list +# all available extensions. +#enable-all-extensions= + +# In error mode, messages with a category besides ERROR or FATAL are +# suppressed, and no reports are done by default. Error mode is compatible with +# disabling specific errors. +#errors-only= + +# Always return a 0 (non-error) status code, even if lint errors are found. +# This is primarily useful in continuous integration scripts. +#exit-zero= + +# A comma-separated list of package or module names from where C extensions may +# be loaded. Extensions are loading into the active Python interpreter and may +# run arbitrary code. +extension-pkg-allow-list= + +# A comma-separated list of package or module names from where C extensions may +# be loaded. Extensions are loading into the active Python interpreter and may +# run arbitrary code. (This is an alternative name to extension-pkg-allow-list +# for backward compatibility.) +extension-pkg-whitelist= + +# Return non-zero exit code if any of these messages/categories are detected, +# even if score is above --fail-under value. Syntax same as enable. Messages +# specified are enabled, while categories only check already-enabled messages. +fail-on= + +# Specify a score threshold to be exceeded before program exits with error. +fail-under=10 + +# Interpret the stdin as a python script, whose filename needs to be passed as +# the module_or_package argument. +#from-stdin= + +# Files or directories to be skipped. They should be base names, not paths. +ignore= + CVS, + migrations, + tests.py, + admin.py + + +# Add files or directories matching the regex patterns to the ignore-list. The +# regex matches against paths and can be in Posix or Windows format. +ignore-paths= + +# Files or directories matching the regex patterns are skipped. The regex +# matches against base names, not paths. The default value ignores Emacs file +# locks +ignore-patterns=^\.# + +# List of module names for which member attributes should not be checked +# (useful for modules/projects where namespaces are manipulated during runtime +# and thus existing member attributes cannot be deduced by static analysis). It +# supports qualified module names, as well as Unix pattern matching. +ignored-modules= + +# Python code to execute, usually for sys.path manipulation such as +# pygtk.require(). +#init-hook= + +# Use multiple processes to speed up Pylint. Specifying 0 will auto-detect the +# number of processors available to use, and will cap the count on Windows to +# avoid hangs. +jobs=1 + +# Control the amount of potential inferred values when inferring a single +# object. This can help the performance when dealing with large functions or +# complex, nested conditions. +limit-inference-results=100 + +# List of plugins (as comma separated values of python module names) to load, +# usually to register additional checkers. +load-plugins= + +# Pickle collected data for later comparisons. +persistent=yes + +# Minimum Python version to use for version dependent checks. Will default to +# the version used to run pylint. +py-version=3.9 + +# Discover python modules and packages in the file system subtree. +recursive=no + +# When enabled, pylint would attempt to guess common misconfiguration and emit +# user-friendly hints instead of false-positive error messages. +suggestion-mode=yes + +# Allow loading of arbitrary C extensions. Extensions are imported into the +# active Python interpreter and may run arbitrary code. +unsafe-load-any-extension=no + +# In verbose mode, extra non-checker-related info will be displayed. +#verbose= + + +[REPORTS] + +# Python expression which should return a score less than or equal to 10. You +# have access to the variables 'fatal', 'error', 'warning', 'refactor', +# 'convention', and 'info' which contain the number of messages in each +# category, as well as 'statement' which is the total number of statements +# analyzed. This score is used by the global evaluation report (RP0004). +evaluation=max(0, 0 if fatal else 10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)) + +# Template used to display messages. This is a python new-style format string +# used to format the message information. See doc for all details. +msg-template= + +# Set the output format. Available formats are text, parseable, colorized, json +# and msvs (visual studio). You can also give a reporter class, e.g. +# mypackage.mymodule.MyReporterClass. +#output-format= + +# Tells whether to display a full report or only the messages. +reports=no + +# Activate the evaluation score. +score=yes + + +[MESSAGES CONTROL] + +# Only show warnings with the listed confidence levels. Leave empty to show +# all. Valid levels: HIGH, CONTROL_FLOW, INFERENCE, INFERENCE_FAILURE, +# UNDEFINED. +confidence=HIGH, + CONTROL_FLOW, + INFERENCE, + INFERENCE_FAILURE, + UNDEFINED + +# Disable the message, report, category or checker with the given id(s). You +# can either give multiple identifiers separated by comma (,) or put this +# option multiple times (only on the command line, not in the configuration +# file where it should appear only once). You can also use "--disable=all" to +# disable everything first and then re-enable specific checks. For example, if +# you want to run only the similarities checker, you can use "--disable=all +# --enable=similarities". If you want to run only the classes checker, but have +# no Warning level messages displayed, use "--disable=all --enable=classes +# --disable=W". +disable = all + +; disable=raw-checker-failed, +; bad-inline-option, +; locally-disabled, +; file-ignored, +; suppressed-message, +; useless-suppression, +; deprecated-pragma, +; use-symbolic-message-instead + +# Enable the message, report, category or checker with the given id(s). You can +# either give multiple identifier separated by comma (,) or put this option +# multiple time (only on the command line, not in the configuration file where +# it should appear only once). See also the "--disable" option for examples. + +enable = + blacklisted-name, + line-too-long, + + abstract-class-instantiated, + abstract-method, + access-member-before-definition, + anomalous-backslash-in-string, + anomalous-unicode-escape-in-string, + arguments-differ, + assert-on-tuple, + assigning-non-slot, + assignment-from-no-return, + assignment-from-none, + attribute-defined-outside-init, + bad-except-order, + bad-format-character, + bad-format-string-key, + bad-format-string, + bad-open-mode, + bad-reversed-sequence, + bad-staticmethod-argument, + bad-str-strip-call, + bad-super-call, + binary-op-exception, + boolean-datetime, + catching-non-exception, + cell-var-from-loop, + confusing-with-statement, + continue-in-finally, + dangerous-default-value, + duplicate-argument-name, + duplicate-bases, + duplicate-except, + duplicate-key, + expression-not-assigned, + format-combined-specification, + format-needs-mapping, + function-redefined, + global-variable-undefined, + import-error, + import-self, + inconsistent-mro, + inherit-non-class, + init-is-generator, + invalid-all-object, + invalid-format-index, + invalid-length-returned, + invalid-sequence-index, + invalid-slice-index, + invalid-slots-object, + invalid-slots, + invalid-unary-operand-type, + logging-too-few-args, + logging-too-many-args, + logging-unsupported-format, + lost-exception, + method-hidden, + misplaced-bare-raise, + misplaced-future, + missing-format-argument-key, + missing-format-attribute, + missing-format-string-key, + + no-method-argument, + no-name-in-module, + no-self-argument, + no-value-for-parameter, + non-iterator-returned, + nonexistent-operator, + not-a-mapping, + not-an-iterable, + not-callable, + not-context-manager, + not-in-loop, + pointless-statement, + pointless-string-statement, + raising-bad-type, + raising-non-exception, + redefined-builtin, + redefined-outer-name, + redundant-keyword-arg, + repeated-keyword, + return-arg-in-generator, + return-in-init, + return-outside-function, + signature-differs, + super-init-not-called, + syntax-error, + too-few-format-args, + too-many-format-args, + too-many-function-args, + truncated-format-string, + undefined-all-variable, + undefined-loop-variable, + undefined-variable, + unexpected-keyword-arg, + unexpected-special-method-signature, + unpacking-non-sequence, + unreachable, + unsubscriptable-object, + unsupported-binary-operation, + unsupported-membership-test, + unused-format-string-argument, + unused-format-string-key, + used-before-assignment, + using-constant-test, + yield-outside-function, + + astroid-error, + fatal, + method-check-failed, + parse-error, + raw-checker-failed, + + unused-argument, + unused-import, + unused-variable, + + eval-used, + exec-used, + + bad-classmethod-argument, + bad-mcs-classmethod-argument, + bad-mcs-method-argument, + bare-except, + broad-except, + consider-iterating-dictionary, + consider-using-enumerate, + global-at-module-level, + global-variable-not-assigned, + logging-format-interpolation, + logging-not-lazy, + multiple-imports, + multiple-statements, + no-classmethod-decorator, + no-staticmethod-decorator, + protected-access, + redundant-unittest-assert, + reimported, + simplifiable-if-statement, + singleton-comparison, + superfluous-parens, + unidiomatic-typecheck, + unnecessary-lambda, + unnecessary-pass, + unnecessary-semicolon, + unneeded-not, + useless-else-on-loop, + + deprecated-method, + deprecated-module, + + too-many-boolean-expressions, + too-many-nested-blocks, + + wildcard-import, + wrong-import-order, + wrong-import-position, + + missing-final-newline, + mixed-line-endings, + trailing-newlines, + trailing-whitespace, + unexpected-line-ending-format, + + bad-inline-option, + bad-option-value, + deprecated-pragma, + unrecognized-inline-option, + useless-suppression, + +; enable=c-extension-no-member + + +[EXCEPTIONS] + +# Exceptions that will emit a warning when caught. +overgeneral-exceptions=BaseException, + Exception + + +[CLASSES] + +# Warn about protected attribute access inside special methods +check-protected-access-in-special-methods=no + +# List of method names used to declare (i.e. assign) instance attributes. +defining-attr-methods=__init__, + __new__, + setUp, + __post_init__ + +# List of member names, which should be excluded from the protected access +# warning. +exclude-protected=_asdict, + _fields, + _replace, + _source, + _make + +# List of valid names for the first argument in a class method. +valid-classmethod-first-arg=cls + +# List of valid names for the first argument in a metaclass class method. +valid-metaclass-classmethod-first-arg=cls + + +[STRING] + +# This flag controls whether inconsistent-quotes generates a warning when the +# character used as a quote delimiter is used inconsistently within a module. +check-quote-consistency=no + +# This flag controls whether the implicit-str-concat should generate a warning +# on implicit string concatenation in sequences defined over several lines. +check-str-concat-over-line-jumps=no + + +[MISCELLANEOUS] + +# List of note tags to take in consideration, separated by a comma. +notes=FIXME, + XXX, + TODO + +# Regular expression of note tags to take in consideration. +notes-rgx= + + +[SPELLING] + +# Limits count of emitted suggestions for spelling mistakes. +max-spelling-suggestions=4 + +# Spelling dictionary name. Available dictionaries: none. To make it work, +# install the 'python-enchant' package. +spelling-dict= + +# List of comma separated words that should be considered directives if they +# appear at the beginning of a comment and should not be checked. +spelling-ignore-comment-directives=fmt: on,fmt: off,noqa:,noqa,nosec,isort:skip,mypy: + +# List of comma separated words that should not be checked. +spelling-ignore-words= + +# A path to a file that contains the private dictionary; one word per line. +spelling-private-dict-file= + +# Tells whether to store unknown words to the private dictionary (see the +# --spelling-private-dict-file option) instead of raising a message. +spelling-store-unknown-words=no + + +[BASIC] + +# Naming style matching correct argument names. +argument-naming-style=snake_case + +# Regular expression matching correct argument names. Overrides argument- +# naming-style. If left empty, argument names will be checked with the set +# naming style. +#argument-rgx= + +# Naming style matching correct attribute names. +attr-naming-style=snake_case + +# Regular expression matching correct attribute names. Overrides attr-naming- +# style. If left empty, attribute names will be checked with the set naming +# style. +#attr-rgx= + +# Bad variable names which should always be refused, separated by a comma. +bad-names=foo, + bar, + baz, + toto, + tutu, + tata + +# Bad variable names regexes, separated by a comma. If names match any regex, +# they will always be refused +bad-names-rgxs= + +# Naming style matching correct class attribute names. +class-attribute-naming-style=any + +# Regular expression matching correct class attribute names. Overrides class- +# attribute-naming-style. If left empty, class attribute names will be checked +# with the set naming style. +#class-attribute-rgx= + +# Naming style matching correct class constant names. +class-const-naming-style=UPPER_CASE + +# Regular expression matching correct class constant names. Overrides class- +# const-naming-style. If left empty, class constant names will be checked with +# the set naming style. +#class-const-rgx= + +# Naming style matching correct class names. +class-naming-style=PascalCase + +# Regular expression matching correct class names. Overrides class-naming- +# style. If left empty, class names will be checked with the set naming style. +#class-rgx= + +# Naming style matching correct constant names. +const-naming-style=UPPER_CASE + +# Regular expression matching correct constant names. Overrides const-naming- +# style. If left empty, constant names will be checked with the set naming +# style. +#const-rgx= + +# Minimum line length for functions/classes that require docstrings, shorter +# ones are exempt. +docstring-min-length=-1 + +# Naming style matching correct function names. +function-naming-style=snake_case + +# Regular expression matching correct function names. Overrides function- +# naming-style. If left empty, function names will be checked with the set +# naming style. +#function-rgx= + +# Good variable names which should always be accepted, separated by a comma. +good-names=i, + j, + k, + ex, + Run, + _ + +# Good variable names regexes, separated by a comma. If names match any regex, +# they will always be accepted +good-names-rgxs= + +# Include a hint for the correct naming format with invalid-name. +include-naming-hint=no + +# Naming style matching correct inline iteration names. +inlinevar-naming-style=any + +# Regular expression matching correct inline iteration names. Overrides +# inlinevar-naming-style. If left empty, inline iteration names will be checked +# with the set naming style. +#inlinevar-rgx= + +# Naming style matching correct method names. +method-naming-style=snake_case + +# Regular expression matching correct method names. Overrides method-naming- +# style. If left empty, method names will be checked with the set naming style. +#method-rgx= + +# Naming style matching correct module names. +module-naming-style=snake_case + +# Regular expression matching correct module names. Overrides module-naming- +# style. If left empty, module names will be checked with the set naming style. +#module-rgx= + +# Colon-delimited sets of names that determine each other's naming style when +# the name regexes allow several styles. +name-group= + +# Regular expression which should only match function or class names that do +# not require a docstring. +no-docstring-rgx=^_ + +# List of decorators that produce properties, such as abc.abstractproperty. Add +# to this list to register other decorators that produce valid properties. +# These decorators are taken in consideration only for invalid-name. +property-classes=abc.abstractproperty + +# Regular expression matching correct type variable names. If left empty, type +# variable names will be checked with the set naming style. +#typevar-rgx= + +# Naming style matching correct variable names. +variable-naming-style=snake_case + +# Regular expression matching correct variable names. Overrides variable- +# naming-style. If left empty, variable names will be checked with the set +# naming style. +#variable-rgx= + + +[DESIGN] + +# List of regular expressions of class ancestor names to ignore when counting +# public methods (see R0903) +exclude-too-few-public-methods= + +# List of qualified class names to ignore when counting class parents (see +# R0901) +ignored-parents= + +# Maximum number of arguments for function / method. +max-args=5 + +# Maximum number of attributes for a class (see R0902). +max-attributes=7 + +# Maximum number of boolean expressions in an if statement (see R0916). +max-bool-expr=5 + +# Maximum number of branch for function / method body. +max-branches=12 + +# Maximum number of locals for function / method body. +max-locals=15 + +# Maximum number of parents for a class (see R0901). +max-parents=7 + +# Maximum number of public methods for a class (see R0904). +max-public-methods=20 + +# Maximum number of return / yield for function / method body. +max-returns=6 + +# Maximum number of statements in function / method body. +max-statements=50 + +# Minimum number of public methods for a class (see R0903). +min-public-methods=2 + + +[LOGGING] + +# The type of string formatting that logging methods do. `old` means using % +# formatting, `new` is for `{}` formatting. +logging-format-style=old + +# Logging modules to check that the string format arguments are in logging +# function parameter format. +logging-modules=logging + + +[IMPORTS] + +# List of modules that can be imported at any level, not just the top level +# one. +allow-any-import-level= + +# Allow wildcard imports from modules that define __all__. +allow-wildcard-with-all=no + +# Deprecated modules which should not be used, separated by a comma. +deprecated-modules= + +# Output a graph (.gv or any supported image format) of external dependencies +# to the given file (report RP0402 must not be disabled). +ext-import-graph= + +# Output a graph (.gv or any supported image format) of all (i.e. internal and +# external) dependencies to the given file (report RP0402 must not be +# disabled). +import-graph= + +# Output a graph (.gv or any supported image format) of internal dependencies +# to the given file (report RP0402 must not be disabled). +int-import-graph= + +# Force import order to recognize a module as part of the standard +# compatibility libraries. +known-standard-library= + +# Force import order to recognize a module as part of a third party library. +known-third-party=enchant + +# Couples of modules and preferred modules, separated by a comma. +preferred-modules= + + +[VARIABLES] + +# List of additional names supposed to be defined in builtins. Remember that +# you should avoid defining new builtins when possible. +additional-builtins= + +# Tells whether unused global variables should be treated as a violation. +allow-global-unused-variables=yes + +# List of names allowed to shadow builtins +allowed-redefined-builtins= + +# List of strings which can identify a callback function by name. A callback +# name must start or end with one of those strings. +callbacks=cb_, + _cb + +# A regular expression matching the name of dummy variables (i.e. expected to +# not be used). +dummy-variables-rgx=_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_ + +# Argument names that match this expression will be ignored. Default to name +# with leading underscore. +ignored-argument-names=_.*|^ignored_|^unused_ + +# Tells whether we should check for unused import in __init__ files. +init-import=no + +# List of qualified module names which can have objects that can redefine +# builtins. +redefining-builtins-modules=six.moves,past.builtins,future.builtins,builtins,io + + +[TYPECHECK] + +# List of decorators that produce context managers, such as +# contextlib.contextmanager. Add to this list to register other decorators that +# produce valid context managers. +contextmanager-decorators=contextlib.contextmanager + +# List of members which are set dynamically and missed by pylint inference +# system, and so shouldn't trigger E1101 when accessed. Python regular +# expressions are accepted. +generated-members= + +# Tells whether to warn about missing members when the owner of the attribute +# is inferred to be None. +ignore-none=yes + +# This flag controls whether pylint should warn about no-member and similar +# checks whenever an opaque object is returned when inferring. The inference +# can return multiple potential results while evaluating a Python object, but +# some branches might not be evaluated, which results in partial inference. In +# that case, it might be useful to still emit no-member and other checks for +# the rest of the inferred objects. +ignore-on-opaque-inference=yes + +# List of symbolic message names to ignore for Mixin members. +ignored-checks-for-mixins=no-member, + not-async-context-manager, + not-context-manager, + attribute-defined-outside-init + +# List of class names for which member attributes should not be checked (useful +# for classes with dynamically set attributes). This supports the use of +# qualified names. +ignored-classes=optparse.Values,thread._local,_thread._local,argparse.Namespace + +# Show a hint with possible names when a member name was not found. The aspect +# of finding the hint is based on edit distance. +missing-member-hint=yes + +# The minimum edit distance a name should have in order to be considered a +# similar match for a missing member name. +missing-member-hint-distance=1 + +# The total number of similar names that should be taken in consideration when +# showing a hint for a missing member. +missing-member-max-choices=1 + +# Regex pattern to define which classes are considered mixins. +mixin-class-rgx=.*[Mm]ixin + +# List of decorators that change the signature of a decorated function. +signature-mutators= + + +[REFACTORING] + +# Maximum number of nested blocks for function / method body +max-nested-blocks=5 + +# Complete name of functions that never returns. When checking for +# inconsistent-return-statements if a never returning function is called then +# it will be considered as an explicit return statement and no message will be +# printed. +never-returning-functions=sys.exit,argparse.parse_error + + +[FORMAT] + +# Expected format of line ending, e.g. empty (any line ending), LF or CRLF. +expected-line-ending-format= + +# Regexp for a line that is allowed to be longer than the limit. +ignore-long-lines=^\s*(# )??$ + +# Number of spaces of indent required inside a hanging or continued line. +indent-after-paren=4 + +# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 +# tab). +indent-string=' ' + +# Maximum number of characters on a single line. +max-line-length=100 + +# Maximum number of lines in a module. +max-module-lines=1000 + +# Allow the body of a class to be on the same line as the declaration if body +# contains single statement. +single-line-class-stmt=no + +# Allow the body of an if to be on the same line as the test if there is no +# else. +single-line-if-stmt=no + + +[SIMILARITIES] + +# Comments are removed from the similarity computation +ignore-comments=yes + +# Docstrings are removed from the similarity computation +ignore-docstrings=yes + +# Imports are removed from the similarity computation +ignore-imports=yes + +# Signatures are removed from the similarity computation +ignore-signatures=yes + +# Minimum lines number of a similarity. +min-similarity-lines=4 From 18d88ee1626adc3cf5e078f091fcbacd1d32c60f Mon Sep 17 00:00:00 2001 From: Sajtospoga01 Date: Sun, 19 Mar 2023 11:40:27 +0000 Subject: [PATCH 53/59] updates flowreader test for proper linting --- .../flow_reader_test.py | 44 +++++++++---------- 1 file changed, 20 insertions(+), 24 deletions(-) diff --git a/tests/segmentation_utils_tests.py/flow_reader_test.py b/tests/segmentation_utils_tests.py/flow_reader_test.py index 04b4695..5c918c6 100644 --- a/tests/segmentation_utils_tests.py/flow_reader_test.py +++ b/tests/segmentation_utils_tests.py/flow_reader_test.py @@ -35,47 +35,48 @@ def flow_from_directory_mock(*args, **kwargs): def test_makes_flow_generator() -> None: patch = MonkeyPatch() # mock an imagedatagenerator from keras - mock_image_datagen = patch.setattr( + patch.setattr( ImageDataGenerator, "flow_from_directory", flow_from_directory_mock, ) patch.setattr(FlowGenerator, "preprocess", lambda self, x, *args, **kwargs: x) # create a flow generator - flow_generator = FlowGenerator(**generator_args) - pass + FlowGenerator(**generator_args) def test_makes_flow_generator_with_queue() -> None: patch = MonkeyPatch() # mock an imagedatagenerator from keras - mock_image_datagen = patch.setattr( + patch.setattr( ImageDataGenerator, "flow_from_directory", flow_from_directory_mock, ) patch.setattr(FlowGenerator, "preprocess", lambda self, x, *args, **kwargs: x) - + # create dummy queues - image_queue = ImagePreprocessor.PreprocessingQueue([lambda x, y, seed: x], [{"y": 1}]) - mask_queue = ImagePreprocessor.PreprocessingQueue([lambda x, y, seed: x], [{"y": 1}]) - + image_queue = ImagePreprocessor.PreprocessingQueue( + [lambda x, y, seed: x], [{"y": 1}] + ) + mask_queue = ImagePreprocessor.PreprocessingQueue( + [lambda x, y, seed: x], [{"y": 1}] + ) + # create a copy of the generator args new_generator_args = generator_args.copy() new_generator_args["preprocessing_queue_image"] = image_queue new_generator_args["preprocessing_queue_mask"] = mask_queue - - # create a flow generator - flow_generator = FlowGenerator(**new_generator_args) - + # create a flow generator + FlowGenerator(**new_generator_args) def test_makes_flow_generator_wrong_shape() -> None: try: patch = MonkeyPatch() # mock an imagedatagenerator from keras - mock_image_datagen = patch.setattr( + patch.setattr( ImageDataGenerator, "flow_from_directory", flow_from_directory_mock, @@ -85,7 +86,7 @@ def test_makes_flow_generator_wrong_shape() -> None: fail_generator = generator_args.copy() # create a flow generator fail_generator["output_size"] = (256, 256, 256) - flow_generator = FlowGenerator(**fail_generator) + FlowGenerator(**fail_generator) assert False except ValueError: assert True @@ -138,7 +139,7 @@ def test_get_dataset_size() -> None: patch = MonkeyPatch() patch.setattr(os, "listdir", lambda x: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) # mock an imagedatagenerator from keras - mock_image_datagen = patch.setattr( + patch.setattr( ImageDataGenerator, "flow_from_directory", flow_from_directory_mock, @@ -186,20 +187,15 @@ def test_reader_error_raised() -> None: mask = np.zeros((256, 256, 1)) image = tf.convert_to_tensor(image) mask = tf.convert_to_tensor(mask) - - input_size = (512, 512) - output_size = (256, 256) - output_reshape = (256, 256) - seed = 0 + # # createing dummy queues image_queue = ImagePreprocessor.PreprocessingQueue( queue=[lambda x, y, seed: x], arguments=[{"y": 1}] ) - + new_generator_args = generator_args.copy() + new_generator_args["preprocessing_queue_image"] = image_queue # create a flow generator - flow_generator = FlowGenerator( - preprocessing_queue_image=image_queue, **generator_args - ) + FlowGenerator(**new_generator_args) assert False except ValueError: assert True From bb4c1cfd540308bf9ab8e529d2bd8cf643696f36 Mon Sep 17 00:00:00 2001 From: Sajtospoga01 Date: Sun, 19 Mar 2023 11:44:09 +0000 Subject: [PATCH 54/59] updates linting on image_preprocessor_test --- .../image_preprocessor_test.py | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/tests/segmentation_utils_tests.py/image_preprocessor_test.py b/tests/segmentation_utils_tests.py/image_preprocessor_test.py index f6e1454..83c3e2f 100644 --- a/tests/segmentation_utils_tests.py/image_preprocessor_test.py +++ b/tests/segmentation_utils_tests.py/image_preprocessor_test.py @@ -1,11 +1,11 @@ -from utilities.segmentation_utils import ImagePreprocessor import numpy as np import tensorflow as tf +from utilities.segmentation_utils import ImagePreprocessor + def test_image_onehot_encoder() -> None: # predifining input variables - n_classes = 2 batch_size = 1 image_size = (512, 512) @@ -14,7 +14,6 @@ def test_image_onehot_encoder() -> None: # creating a mask with 2 classes mask = np.zeros((batch_size, output_size[0] * output_size[1])) mask[:, ::2] = 1 - # creating a onehot mask to compare with the output of the function onehot_test = np.zeros((batch_size, output_size[0] * output_size[1], n_classes)) @@ -41,7 +40,6 @@ def test_image_augmentation_pipeline_column() -> None: input_size = (512, 512) output_size = (256 * 256, 1) output_reshape = (256, 256) - seed = 0 # createing dummy queues image_queue = ImagePreprocessor.PreprocessingQueue( @@ -70,7 +68,6 @@ def test_image_augmentation_pipeline_squarematrix() -> None: input_size = (512, 512) output_size = (256, 256) - seed = 0 # createing dummy queues image_queue = ImagePreprocessor.PreprocessingQueue( @@ -100,9 +97,6 @@ def test_processing_queue() -> None: image_queue = ImagePreprocessor.PreprocessingQueue( queue=[lambda seed: seed], arguments=[dict(seed=1)] ) - mask_queue = ImagePreprocessor.PreprocessingQueue( - queue=[lambda seed: seed], arguments=[dict(seed=1)] - ) # changing the seed new_seed = 5 @@ -120,7 +114,6 @@ def test_generate_default_queue() -> None: image_queue.update_seed(new_seed) assert image_queue.arguments[0]["seed"] == new_seed - assert image_queue.get_queue_length() == 6 assert mask_queue.get_queue_length() == 2 From 6d80db91f096a50ee77bd5df3a99b7c56609969a Mon Sep 17 00:00:00 2001 From: Sajtospoga01 Date: Sun, 19 Mar 2023 12:24:09 +0000 Subject: [PATCH 55/59] updates environment name of documentation environment --- .github/workflows/static.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/static.yml b/.github/workflows/static.yml index 21341d9..113baae 100644 --- a/.github/workflows/static.yml +++ b/.github/workflows/static.yml @@ -54,7 +54,7 @@ jobs: deploy: needs: build environment: - name: github-pages + name: documentation url: ${{ steps.deployment.outputs.page_url }} runs-on: ubuntu-latest steps: From a211592c4739afc8ec3b9e68513dcc88e562a9c2 Mon Sep 17 00:00:00 2001 From: Sajtospoga01 Date: Sun, 19 Mar 2023 12:27:54 +0000 Subject: [PATCH 56/59] checkout changes --- .github/workflows/static.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/static.yml b/.github/workflows/static.yml index 113baae..5d60b39 100644 --- a/.github/workflows/static.yml +++ b/.github/workflows/static.yml @@ -4,7 +4,7 @@ name: Deploy Documentation on Pages on: # Runs on pushes targeting the default branch push: - branches: ["main"] + branches: ["migrating_segmentation_utils"] # Allows you to run this workflow manually from the Actions tab workflow_dispatch: From 060a58c970358b98a599763a9653a4c8aec0aa97 Mon Sep 17 00:00:00 2001 From: Sajtospoga01 Date: Sun, 19 Mar 2023 12:34:13 +0000 Subject: [PATCH 57/59] update documentation deployment workflow to only trigger on main --- .github/workflows/static.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/static.yml b/.github/workflows/static.yml index 5d60b39..113baae 100644 --- a/.github/workflows/static.yml +++ b/.github/workflows/static.yml @@ -4,7 +4,7 @@ name: Deploy Documentation on Pages on: # Runs on pushes targeting the default branch push: - branches: ["migrating_segmentation_utils"] + branches: ["main"] # Allows you to run this workflow manually from the Actions tab workflow_dispatch: From 6e26011f2ef718cb8562f6d97b80800e9f21f207 Mon Sep 17 00:00:00 2001 From: Sajtospoga01 Date: Sun, 19 Mar 2023 13:49:57 +0000 Subject: [PATCH 58/59] test pipeline status badge --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index f89c5eb..948c251 100644 --- a/README.md +++ b/README.md @@ -1,3 +1,5 @@ +![example branch parameter](https://github.com/guorbit/utilities/actions/workflows/python-app.yml/badge.svg?branch=migrating_segmentation_utils) + Note before installation: None of these commands have been properly tested. Make sure you installed the package in a virtual environment. For installing the utilities repo as a package use the following commands in the terminal: From 34737744ed2954a4f03211d65c78d6c877c7959a Mon Sep 17 00:00:00 2001 From: Andras Bodrogai <81911031+Sajtospoga01@users.noreply.github.com> Date: Sun, 19 Mar 2023 13:53:35 +0000 Subject: [PATCH 59/59] Update python-app.yml --- .github/workflows/python-app.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/python-app.yml b/.github/workflows/python-app.yml index 525d861..b42b801 100644 --- a/.github/workflows/python-app.yml +++ b/.github/workflows/python-app.yml @@ -1,7 +1,7 @@ # This workflow will install Python dependencies, run tests and lint with a single version of Python # For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python -name: Python application +name: CI pipeline on: push: