From 9105a9aabe853f336b7e1ecd6b62b942491ca453 Mon Sep 17 00:00:00 2001 From: cakester Date: Thu, 17 Jun 2021 15:42:06 -0400 Subject: [PATCH] call NumpyToTensor last (#818) * call NumpyToTensor before NormalizeInstance * fixing tests * call NumpyToTensor last * fix tests * use new release of data_functional_test --- ivadomed/scripts/download_data.py | 2 +- ivadomed/scripts/visualize_transforms.py | 6 +++++- ivadomed/transforms.py | 17 +++++++++++++---- testing/functional_tests/test_segment_volume.py | 3 --- testing/unit_tests/test_orientation.py | 1 - testing/unit_tests/test_testing.py | 1 - 6 files changed, 19 insertions(+), 11 deletions(-) diff --git a/ivadomed/scripts/download_data.py b/ivadomed/scripts/download_data.py index 8d3290f2b..1a47eb8e3 100644 --- a/ivadomed/scripts/download_data.py +++ b/ivadomed/scripts/download_data.py @@ -47,7 +47,7 @@ "url": ["https://github.com/ivadomed/model_find_disc_t2/archive/r20200928.zip"], "description": "Intervertebral disc detection model trained on T2-weighted images."}, "data_functional_testing": { - "url": ["https://github.com/ivadomed/data_functional_testing/archive/r20210204.zip"], + "url": ["https://github.com/ivadomed/data_functional_testing/archive/r20210617.zip"], "description": "Data used for functional testing in Ivadomed." } diff --git a/ivadomed/scripts/visualize_transforms.py b/ivadomed/scripts/visualize_transforms.py index 3a6d3bcf3..960e9f957 100644 --- a/ivadomed/scripts/visualize_transforms.py +++ b/ivadomed/scripts/visualize_transforms.py @@ -5,6 +5,7 @@ import nibabel as nib import numpy as np import random +import torch from ivadomed import config_manager as imed_config_manager from ivadomed.loader import utils as imed_loader_utils @@ -158,7 +159,10 @@ def run_visualization(input, config, number, output, roi): before = np.rot90(imed_maths.rescale_values_array(data[0], 0.0, 1.0)) else: before = after - after = np.rot90(imed_maths.rescale_values_array(stack_im[0], 0.0, 1.0)) + if isinstance(stack_im[0], torch.Tensor): + after = np.rot90(imed_maths.rescale_values_array(stack_im[0].numpy(), 0.0, 1.0)) + else: + after = np.rot90(imed_maths.rescale_values_array(stack_im[0], 0.0, 1.0)) # Plot imed_utils.plot_transformed_sample(before, after, diff --git a/ivadomed/transforms.py b/ivadomed/transforms.py index 08ac370e3..76325cdfa 100644 --- a/ivadomed/transforms.py +++ b/ivadomed/transforms.py @@ -142,13 +142,17 @@ def __init__(self, dict_transforms, requires_undo=False): "gt": torchvision_transforms.Compose(list_tr_gt), "roi": torchvision_transforms.Compose(list_tr_roi)} - def __call__(self, sample, metadata, data_type='im'): + def __call__(self, sample, metadata, data_type='im', preprocessing=False): if self.transform[data_type] is None or len(metadata) == 0: # In case self.transform[data_type] is None return None, None else: for tr in self.transform[data_type].transforms: sample, metadata = tr(sample, metadata) + + if not preprocessing: + numpy_to_tensor = NumpyToTensor() + sample, metadata = numpy_to_tensor(sample, metadata) return sample, metadata @@ -172,6 +176,8 @@ def __call__(self, sample, metadata, data_type='gt'): # In case self.transforms.transform[data_type] is None return None, None else: + numpy_to_tensor = NumpyToTensor() + sample, metadata = numpy_to_tensor.undo_transform(sample, metadata) for tr in self.transforms.transform[data_type].transforms[::-1]: sample, metadata = tr.undo_transform(sample, metadata) return sample, metadata @@ -1067,17 +1073,20 @@ def apply_preprocessing_transforms(transforms, seg_pair, roi_pair=None): if roi_pair is not None: stack_roi, metadata_roi = transforms(sample=roi_pair["gt"], metadata=roi_pair['gt_metadata'], - data_type="roi") + data_type="roi", + preprocessing=True) metadata_input = imed_loader_utils.update_metadata(metadata_roi, metadata_input) # Run transforms on images stack_input, metadata_input = transforms(sample=seg_pair["input"], metadata=metadata_input, - data_type="im") + data_type="im", + preprocessing=True) # Run transforms on images metadata_gt = imed_loader_utils.update_metadata(metadata_input, seg_pair['gt_metadata']) stack_gt, metadata_gt = transforms(sample=seg_pair["gt"], metadata=metadata_gt, - data_type="gt") + data_type="gt", + preprocessing=True) seg_pair = { 'input': stack_input, diff --git a/testing/functional_tests/test_segment_volume.py b/testing/functional_tests/test_segment_volume.py index 99a5a1c15..358f8d447 100644 --- a/testing/functional_tests/test_segment_volume.py +++ b/testing/functional_tests/test_segment_volume.py @@ -59,7 +59,6 @@ def test_segment_volume_2d(download_functional_test_files): "applied_to": ["im", "gt"], "dataset_type": ["training"] }, - "NumpyToTensor": {}, "NormalizeInstance": {"applied_to": ["im"]} }, "postprocessing": {}, @@ -117,7 +116,6 @@ def test_segment_volume_2d_with_patches(download_functional_test_files, center_c "applied_to": ["im", "gt"], "dataset_type": ["training"] }, - "NumpyToTensor": {}, "NormalizeInstance": {"applied_to": ["im"]} }, "postprocessing": {}, @@ -182,7 +180,6 @@ def test_segment_volume_3d(download_functional_test_files, center_crop): "applied_to": ["im", "gt"], "dataset_type": ["training"] }, - "NumpyToTensor": {}, "NormalizeInstance": {"applied_to": ["im"]} }, "postprocessing": {}, diff --git a/testing/unit_tests/test_orientation.py b/testing/unit_tests/test_orientation.py index 45776bba0..bac345f24 100644 --- a/testing/unit_tests/test_orientation.py +++ b/testing/unit_tests/test_orientation.py @@ -53,7 +53,6 @@ def test_image_orientation(download_data_testing_test_files, loader_parameters): { "size": [176, 128, 160] }, - "NumpyToTensor": {}, "NormalizeInstance": {"applied_to": ['im']} } diff --git a/testing/unit_tests/test_testing.py b/testing/unit_tests/test_testing.py index ab60ce59a..5f8e30c4b 100644 --- a/testing/unit_tests/test_testing.py +++ b/testing/unit_tests/test_testing.py @@ -36,7 +36,6 @@ def setup_function(): "CenterCrop": { "size": [48, 48] }, - "NumpyToTensor": {}, "NormalizeInstance": {"applied_to": ["im"]} }]) @pytest.mark.parametrize('test_lst', [['sub-unf01']])