Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
18 commits
Select commit Hold shift + click to select a range
1ea5efc
updates image preprocessor by removing squeezing from onehot encode m…
Sajtospoga01 May 23, 2023
6a37482
adds experimental implementation of the flowreader object changing th…
Sajtospoga01 May 23, 2023
3ee7736
updates flowreader experimental to run with training
Sajtospoga01 May 23, 2023
952ed85
specifies datatypes for the mask store array
Sajtospoga01 May 24, 2023
f913e50
removes uint declaration
Sajtospoga01 May 24, 2023
7cc5ae3
fixes possible indexing issue and changes default reader library from…
Sajtospoga01 May 25, 2023
39fd65b
adds bicubic interpolation to the image reader
Sajtospoga01 May 25, 2023
38e16a9
removes some unnecessary things
Sajtospoga01 May 25, 2023
10653a9
adds possible fix for image flips not being determinable
Sajtospoga01 May 25, 2023
0c58719
adds function to validate dataset
Sajtospoga01 May 25, 2023
bea1882
updates, flowreader and processing pipeline to be able to read images
Sajtospoga01 Jun 1, 2023
1068f61
Improves code quality, updates documentation
Sajtospoga01 Jun 4, 2023
4ee21bd
update flowgenerator so it reads weights, doesnt return them
Sajtospoga01 Jun 6, 2023
2eed73a
adds sorting to weight reader
Sajtospoga01 Jun 6, 2023
591c276
fixes dtype errors in image preprocessor casting, and fixes weight re…
Sajtospoga01 Jun 6, 2023
1dfaa9d
update tests to fit new changes, add pd to requirements
Sajtospoga01 Jun 26, 2023
fad0b63
add arguments to image cutting loop so parameters are changable
Sajtospoga01 Jun 26, 2023
f3c59f2
in __len__ function added self.mini_batch
AyleenSohaib Jul 30, 2023
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -3,3 +3,4 @@ numpy==1.24.1
rasterio==1.3.6
Pillow==9.4.0
tqdm==4.64.1
pandas==1.5.1
8 changes: 4 additions & 4 deletions tests/segmentation_utils_tests.py/flow_reader_test.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
import os

import numpy as np
import pytest
import tensorflow as tf
from keras.preprocessing.image import ImageDataGenerator
from pytest import MonkeyPatch
Expand Down Expand Up @@ -67,11 +68,10 @@ def test_makes_flow_generator_with_queue() -> None:

# create a copy of the generator args
new_generator_args = generator_args.copy()
new_generator_args["preprocessing_queue_image"] = image_queue
new_generator_args["preprocessing_queue_mask"] = mask_queue

# create a flow generator
FlowGenerator(**new_generator_args)
generator = FlowGenerator(**new_generator_args)
generator.set_preprocessing_pipeline(image_queue, mask_queue)


def test_makes_flow_generator_wrong_shape() -> None:
Expand Down Expand Up @@ -181,7 +181,7 @@ def test_get_generator() -> None:
patch.undo()
patch.undo()


@pytest.mark.skip(reason="Deprecated functionality")
def test_reader_error_raised() -> None:
try:
# predifining input variables
Expand Down
51 changes: 25 additions & 26 deletions tests/segmentation_utils_tests.py/image_preprocessor_test.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,12 @@
import numpy as np
import pytest
import tensorflow as tf

from utilities.segmentation_utils import ImagePreprocessor


def test_image_onehot_encoder() -> None:
@pytest.mark.skip(reason="Deprecated functionality")
def test_image_onehot_encoder_column() -> None:
# predifining input variables
n_classes = 2
batch_size = 1
Expand All @@ -24,39 +26,37 @@ def test_image_onehot_encoder() -> None:

assert one_hot_image.shape == (
1,
image_size[0] // 2 * image_size[1] // 2,
output_size[0] * output_size[1],
n_classes,
)
assert np.array_equal(one_hot_image, onehot_test)


def test_image_augmentation_pipeline_column() -> None:
def test_image_onehot_encoder_squarematrix() -> None:
# predifining input variables
image = np.zeros((512, 512, 3))
mask = np.zeros((256 * 256, 1))
image = tf.convert_to_tensor(image)
mask = tf.convert_to_tensor(mask)
n_classes = 2
batch_size = 1
image_size = (512, 512)
output_size = (256, 256)

input_size = (512, 512)
output_size = (256 * 256, 1)
output_reshape = (256, 256)
# creating a mask with 2 classes
mask = np.zeros((batch_size, output_size[0], output_size[1]))
mask[:, ::2,:] = 1

# creating dummy queues
image_queue = ImagePreprocessor.PreprocessingQueue(
queue=[lambda x, y, seed: x], arguments=[{"y": 1}]
)
mask_queue = ImagePreprocessor.PreprocessingQueue(
queue=[lambda x, y, seed: x], arguments=[{"y": 1}]
)
# creating a onehot mask to compare with the output of the function
onehot_test = np.zeros((batch_size, output_size[0] , output_size[1], n_classes))
onehot_test[:, ::2, :,1] = 1
onehot_test[:, 1::2,:, 0] = 1

image_new, mask_new = ImagePreprocessor.augmentation_pipeline(
image, mask, input_size, output_size, image_queue, mask_queue,output_reshape
)
image_new = image_new.numpy()
mask_new = mask_new.numpy()
one_hot_image = ImagePreprocessor.onehot_encode(mask, output_size, n_classes)

assert np.array(image_new).shape == (512, 512, 3)
assert np.array(mask_new).shape == (256 * 256, 1, 1)
assert one_hot_image.shape == (
1,
output_size[0],
output_size[1],
n_classes,
)
assert np.array_equal(one_hot_image, onehot_test)


def test_image_augmentation_pipeline_squarematrix() -> None:
Expand Down Expand Up @@ -123,5 +123,4 @@ def test_flatten() -> None:
image = tf.convert_to_tensor(image)
image = ImagePreprocessor.flatten(image, (512, 512), 3)
image = image.numpy()
assert image.shape == (512 * 512, 1, 3)

assert image.shape == (512 * 512, 3)
112 changes: 112 additions & 0 deletions tests/segmentation_utils_tests.py/test_flowreader.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,112 @@
import os

import numpy as np
import pytest
import tensorflow as tf
from keras.preprocessing.image import ImageDataGenerator
from pytest import MonkeyPatch

from utilities.segmentation_utils import ImagePreprocessor
from utilities.segmentation_utils.flowreader import FlowGeneratorExperimental


def test_can_create_instance() -> None:
patch = MonkeyPatch()
# mock list directory
patch.setattr(os, "listdir", lambda x: ["a", "b", "c"])

# create generator instance
generator = FlowGeneratorExperimental(
image_path="tests/segmentation_utils_tests/flow_reader_test",
mask_path="tests/segmentation_utils_tests/flow_reader_test",
image_size=(512, 512),
output_size=(512,512),
num_classes=7,
channel_mask= [True,True,True]
)
pass

def test_set_preprocessing_pipeline() -> None:
patch = MonkeyPatch()
# mock list directory
patch.setattr(os, "listdir", lambda x: ["a", "b", "c"])

# create generator instance
generator = FlowGeneratorExperimental(
image_path="tests/segmentation_utils_tests/flow_reader_test",
mask_path="tests/segmentation_utils_tests/flow_reader_test",
image_size=(512, 512),
output_size=(512,512),
num_classes=7,
channel_mask= [True,True,True]
)

image_queue = ImagePreprocessor.PreprocessingQueue(queue=[],arguments=[])
mask_queue = ImagePreprocessor.PreprocessingQueue(queue=[],arguments=[])

generator.set_preprocessing_pipeline(
image_queue,mask_queue
)
pass

def test_set_mini_batch_size() -> None:
patch = MonkeyPatch()
# mock list directory
patch.setattr(os, "listdir", lambda x: ["a", "b", "c"])

# create generator instance
generator = FlowGeneratorExperimental(
image_path="tests/segmentation_utils_tests/flow_reader_test",
mask_path="tests/segmentation_utils_tests/flow_reader_test",
image_size=(512, 512),
output_size=(512,512),
num_classes=7,
channel_mask= [True,True,True]
)

generator.set_mini_batch_size(2)
assert generator.mini_batch == 2

def test_set_mini_batch_size_too_large() -> None:

patch = MonkeyPatch()
# mock list directory
patch.setattr(os, "listdir", lambda x: ["a", "b", "c"])

# create generator instance
generator = FlowGeneratorExperimental(
image_path="tests/segmentation_utils_tests/flow_reader_test",
mask_path="tests/segmentation_utils_tests/flow_reader_test",
image_size=(512, 512),
output_size=(512,512),
num_classes=7,
channel_mask= [True,True,True]
)
with pytest.raises(ValueError) as exc_info:
generator.set_mini_batch_size(5)

assert exc_info.value.args[0] == "The mini batch size cannot be larger than the batch size"


def test_set_mini_batch_size_not_devisable() -> None:

patch = MonkeyPatch()
# mock list directory
patch.setattr(os, "listdir", lambda x: ["a", "b", "c"])

# create generator instance
generator = FlowGeneratorExperimental(
image_path="tests/segmentation_utils_tests/flow_reader_test",
mask_path="tests/segmentation_utils_tests/flow_reader_test",
image_size=(512, 512),
output_size=(512,512),
num_classes=7,
channel_mask= [True,True,True],
batch_size=3

)
with pytest.raises(ValueError) as exc_info:
generator.set_mini_batch_size(2)

assert exc_info.value.args[0] == "The batch size must be divisible by the mini batch size"

77 changes: 72 additions & 5 deletions utilities/segmentation_utils/ImagePreprocessor.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,21 @@
from dataclasses import dataclass
from typing import Callable, Dict, Optional
from typing import Callable, Dict, Optional, Protocol

import numpy as np
import tensorflow as tf


class PreprocessorInterface(Protocol):
queue: list[Callable]
arguments: list[Dict]

def update_seed(self, seed: int) -> None:
...

def get_queue_length(self) -> int:
...


@dataclass
class PreprocessingQueue:
"""
Expand Down Expand Up @@ -96,9 +107,15 @@ def onehot_encode(masks, output_size, num_classes) -> tf.Tensor:
-------
:return tf.Tensor: Batch of one-hot encoded masks
"""
encoded = np.zeros((masks.shape[0], output_size[0] * output_size[1], num_classes))
#!TODO: add support for 1D masks
encoded = np.zeros((masks.shape[0], output_size[0], output_size[1], num_classes))
for i in range(num_classes):
encoded[:, :, i] = tf.squeeze((masks == i).astype(int))
mask = (masks == i).astype(float)
encoded[:, :, :, i] = mask
if output_size[1] == 1:
encoded = encoded.reshape(
(masks.shape[0], output_size[0] * output_size[1], num_classes)
)
encoded = tf.convert_to_tensor(encoded)
return encoded

Expand Down Expand Up @@ -152,7 +169,9 @@ def augmentation_pipeline(

# reshapes masks, such that transforamtions work properly
if output_reshape is not None and output_size[1] == 1:
mask = tf.reshape(mask, (output_reshape[0], output_reshape[1], 1))
mask = tf.reshape(mask, (output_reshape[0], output_reshape[1]))

mask = tf.expand_dims(mask, axis=-1)

image_queue.update_seed(seed)
mask_queue.update_seed(seed)
Expand All @@ -166,6 +185,12 @@ def augmentation_pipeline(
# flattens masks out to the correct output shape
if output_size[1] == 1:
mask = flatten(mask, output_size, channels=1)
else:
mask = tf.squeeze(mask, axis=-1)

mask = tf.convert_to_tensor(mask)
# image = tf.convert_to_tensor(tf.clip_by_value(image, 0, 1))

return image, mask


Expand All @@ -187,4 +212,46 @@ def flatten(image, input_size, channels=1) -> tf.Tensor:
:return tf.Tensor: flattened image
"""
# the 1 is required to preserve the shape similar to the original
return tf.reshape(image, (input_size[0] * input_size[1], 1, channels))
return tf.convert_to_tensor(tf.reshape(image, (input_size[0] * input_size[1], channels)))


def random_flip_up_down(image, seed=0) -> tf.Tensor:
"""
Function that randomly flips an image up or down

Parameters
----------
:tf.Tensor image: image to be flipped

Returns
-------
:return tf.Tensor: flipped image
"""

state = np.random.RandomState(seed)
flip = state.choice([True, False])
if flip:
return tf.convert_to_tensor(tf.image.flip_up_down(image))
else:
return image


def random_flip_left_right(image, seed=0) -> tf.Tensor:
"""
Function that randomly flips an image left or right

Parameters
----------
:tf.Tensor image: image to be flipped

Returns
-------
:return tf.Tensor: flipped image
"""

state = np.random.RandomState(seed)
flip = state.choice([True, False])
if flip:
return tf.convert_to_tensor(tf.image.flip_left_right(image))
else:
return image
Loading