Skip to content

Commit

Permalink
Merge pull request #762 from mantidproject/revert_to_ctype_sharedarray
Browse files Browse the repository at this point in the history
  • Loading branch information
Dimitar Tasev committed Dec 16, 2020
2 parents 2fa6a6b + e0a4187 commit 3d84374
Show file tree
Hide file tree
Showing 37 changed files with 247 additions and 603 deletions.
1 change: 0 additions & 1 deletion conda/meta.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,6 @@ requirements:
- astropy
- scipy
- scikit-image=0.17.2
- sharedarray
- numpy
- tomopy=1.7.1=cuda*
- cudatoolkit=10.2*
Expand Down
40 changes: 7 additions & 33 deletions mantidimaging/core/data/images.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,7 @@ def __init__(self,
filenames: Optional[List[str]] = None,
indices: Optional[Tuple[int, int, int]] = None,
metadata: Optional[Dict[str, Any]] = None,
sinograms: bool = False,
memory_filename: Optional[str] = None):
sinograms: bool = False):
"""
:param data: Images of the Sample/Projection data
Expand All @@ -42,7 +41,6 @@ def __init__(self,
self.metadata: Dict[str, Any] = deepcopy(metadata) if metadata else {}
self._is_sinograms = sinograms

self.memory_filename = memory_filename
self._proj180deg: Optional[Images] = None
self._log_file: Optional[IMATLogFile] = None
self._projection_angles: Optional[ProjectionAngles] = None
Expand All @@ -67,25 +65,6 @@ def __str__(self):
def count(self) -> int:
return len(self._filenames) if self._filenames else 0

def free_memory(self, delete_filename=True):
"""
Delete the memory file containing the data, and the references to it within this class.
The memory will not be freed until _all_ references to it are gone, so local variables
can safely keep a reference even after deletion. This is used in unit testing data
generation, and ROI normalisation.
:param delete_filename: Whether to reset the memory filename attribute.
Set this to False in cases where the data will be replaced with
data with a new shape (rebin, crop), but the memory filename
ought to remain the same.
"""
if self.memory_filename is not None:
pu.delete_shared_array(self.memory_filename)
if delete_filename:
self.memory_filename = None
self.data = None

@property
def filenames(self) -> Optional[List[str]]:
return self._filenames
Expand Down Expand Up @@ -133,8 +112,7 @@ def prepare(o):

def copy(self, flip_axes=False) -> 'Images':
shape = (self.data.shape[1], self.data.shape[0], self.data.shape[2]) if flip_axes else self.data.shape
data_name = pu.create_shared_name()
data_copy = pu.create_array(shape, self.data.dtype, data_name)
data_copy = pu.create_array(shape, self.data.dtype)
if flip_axes:
data_copy[:] = np.swapaxes(self.data, 0, 1)
else:
Expand All @@ -143,22 +121,19 @@ def copy(self, flip_axes=False) -> 'Images':
images = Images(data_copy,
indices=deepcopy(self.indices),
metadata=deepcopy(self.metadata),
sinograms=not self.is_sinograms if flip_axes else self.is_sinograms,
memory_filename=data_name)
sinograms=not self.is_sinograms if flip_axes else self.is_sinograms)
return images

def copy_roi(self, roi: SensibleROI):
shape = (self.data.shape[0], roi.height, roi.width)

data_name = pu.create_shared_name()
data_copy = pu.create_array(shape, self.data.dtype, data_name)
data_copy = pu.create_array(shape, self.data.dtype)
data_copy[:] = self.data[:, roi.top:roi.bottom, roi.left:roi.right]

images = Images(data_copy,
indices=deepcopy(self.indices),
metadata=deepcopy(self.metadata),
sinograms=self._is_sinograms,
memory_filename=data_name)
sinograms=self._is_sinograms)

mark_cropped(images, roi)
return images
Expand Down Expand Up @@ -259,9 +234,8 @@ def dtype(self):

@staticmethod
def create_empty_images(shape, dtype, metadata):
shared_name = pu.create_shared_name()
arr = pu.create_array(shape, dtype, shared_name)
return Images(arr, memory_filename=shared_name, metadata=metadata)
arr = pu.create_array(shape, dtype)
return Images(arr, metadata=metadata)

@property
def is_sinograms(self):
Expand Down
17 changes: 3 additions & 14 deletions mantidimaging/core/data/test/images_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,16 +53,8 @@ def test_record_parameters_in_metadata(self):
imgs.metadata[const.OPERATION_HISTORY][0].pop(const.TIMESTAMP)
self.assertEqual(imgs.metadata, expected)

def test_free_memory(self):
images = generate_images(automatic_free=False)
self.assertIsNotNone(images.memory_filename)
self.assertIsNotNone(images.data)
images.free_memory()
self.assertIsNone(images.memory_filename)
self.assertIsNone(images.data)

def test_copy(self):
images = generate_images(automatic_free=False)
images = generate_images()
images.record_operation("Test", "Display", 123)
self.assertFalse(images.is_sinograms)
copy = images.copy()
Expand All @@ -73,11 +65,10 @@ def test_copy(self):
copy.data[:] = 150

self.assertEqual(images.metadata, copy.metadata)
self.assertNotEqual(images.memory_filename, copy.memory_filename)
self.assertNotEqual(images, copy)

def test_copy_flip_axes(self):
images = generate_images(automatic_free=False)
images = generate_images()
images.record_operation("Test", "Display", 123)
self.assertFalse(images.is_sinograms)
copy = images.copy(flip_axes=True)
Expand All @@ -88,11 +79,10 @@ def test_copy_flip_axes(self):
copy.data[:] = 150

self.assertEqual(images.metadata, copy.metadata)
self.assertNotEqual(images.memory_filename, copy.memory_filename)
self.assertNotEqual(images.sinograms, copy)

def test_copy_roi(self):
images = generate_images(automatic_free=False)
images = generate_images()
images.record_operation("Test", "Display", 123)
self.assertFalse(images.is_sinograms)
cropped_copy = images.copy_roi(SensibleROI(0, 0, 5, 5))
Expand All @@ -107,7 +97,6 @@ def test_copy_roi(self):
cropped_copy.metadata[const.OPERATION_HISTORY].pop(-1)
# the two metadatas show now be equal again
self.assertEqual(images.metadata, cropped_copy.metadata)
self.assertNotEqual(images.memory_filename, cropped_copy.memory_filename)
self.assertNotEqual(images, cropped_copy)

def test_filenames_set(self):
Expand Down
18 changes: 4 additions & 14 deletions mantidimaging/core/gpu/test/gpu_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,16 +24,6 @@ class GPUTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(GPUTest, self).__init__(*args, **kwargs)

@staticmethod
def run_serial(data, size, mode):
"""
Run the median filter in serial.
"""
th.switch_mp_off()
cpu_result = MedianFilter.filter_func(data, size, mode)
th.switch_mp_on()
return cpu_result

@unittest.skipIf(GPU_NOT_AVAIL, reason=GPU_SKIP_REASON)
def test_numpy_pad_modes_match_scipy_median_modes(self):
"""
Expand All @@ -47,7 +37,7 @@ def test_numpy_pad_modes_match_scipy_median_modes(self):
images = th.generate_shared_array()

gpu_result = MedianFilter.filter_func(images.copy(), size, mode, force_cpu=False)
cpu_result = self.run_serial(images.copy(), size, mode)
cpu_result = MedianFilter.filter_func(images.copy(), size, mode, force_cpu=True)

npt.assert_almost_equal(gpu_result[0], cpu_result[0])

Expand Down Expand Up @@ -80,7 +70,7 @@ def test_gpu_result_matches_cpu_result_for_larger_images(self):
images = th.generate_shared_array(shape=(20, N, N))

gpu_result = MedianFilter.filter_func(images.copy(), size, mode, force_cpu=False)
cpu_result = self.run_serial(images.copy(), size, mode)
cpu_result = MedianFilter.filter_func(images.copy(), size, mode, force_cpu=True)

npt.assert_almost_equal(gpu_result, cpu_result)

Expand All @@ -95,7 +85,7 @@ def test_double_is_used_in_cuda_for_float_64_arrays(self):
images = th.generate_shared_array(dtype="float64")

gpu_result = MedianFilter.filter_func(images.copy(), size, mode, force_cpu=False)
cpu_result = self.run_serial(images.copy(), size, mode)
cpu_result = MedianFilter.filter_func(images.copy(), size, mode, force_cpu=True)

npt.assert_almost_equal(gpu_result, cpu_result)

Expand All @@ -115,7 +105,7 @@ def test_image_slicing_works(self):
images = th.generate_shared_array(shape=(n_images, N, N))

gpu_result = MedianFilter.filter_func(images.copy(), size, mode, force_cpu=False)
cpu_result = self.run_serial(images.copy(), size, mode)
cpu_result = MedianFilter.filter_func(images.copy(), size, mode, force_cpu=True)

npt.assert_almost_equal(gpu_result, cpu_result)

Expand Down
47 changes: 21 additions & 26 deletions mantidimaging/core/io/loader/img_loader.py
Original file line number Diff line number Diff line change
Expand Up @@ -62,21 +62,18 @@ def execute(load_func,

# we load the flat and dark first, because if they fail we don't want to
# fail after we've loaded a big stack into memory
flat_before_data, flat_before_filenames, flat_before_mfname = il.load_data(flat_before_path)
flat_after_data, flat_after_filenames, flat_after_mfname = il.load_data(flat_after_path)
dark_before_data, dark_before_filenames, dark_before_mfname = il.load_data(dark_before_path)
dark_after_data, dark_after_filenames, dark_after_mfname = il.load_data(dark_after_path)
sample_data, sample_mfname = il.load_sample_data(chosen_input_filenames)

return Dataset(Images(sample_data, chosen_input_filenames, indices, memory_filename=sample_mfname),
flat_before=Images(flat_before_data, flat_before_filenames, memory_filename=flat_before_mfname)
if flat_before_data is not None else None,
flat_after=Images(flat_after_data, flat_after_filenames, memory_filename=flat_after_mfname)
if flat_after_data is not None else None,
dark_before=Images(dark_before_data, dark_before_filenames, memory_filename=dark_before_mfname)
if dark_before_data is not None else None,
dark_after=Images(dark_after_data, dark_after_filenames, memory_filename=dark_after_mfname)
if dark_after_data is not None else None)
flat_before_data, flat_before_filenames = il.load_data(flat_before_path)
flat_after_data, flat_after_filenames = il.load_data(flat_after_path)
dark_before_data, dark_before_filenames = il.load_data(dark_before_path)
dark_after_data, dark_after_filenames = il.load_data(dark_after_path)
sample_data = il.load_sample_data(chosen_input_filenames)

return Dataset(
Images(sample_data, chosen_input_filenames, indices),
flat_before=Images(flat_before_data, flat_before_filenames) if flat_before_data is not None else None,
flat_after=Images(flat_after_data, flat_after_filenames) if flat_after_data is not None else None,
dark_before=Images(dark_before_data, dark_before_filenames) if dark_before_data is not None else None,
dark_after=Images(dark_after_data, dark_after_filenames) if dark_after_data is not None else None)


class ImageLoader(object):
Expand All @@ -91,9 +88,8 @@ def __init__(self, load_func, img_format, img_shape, data_dtype, indices, progre
def load_sample_data(self, input_file_names):
# determine what the loaded data was
if len(self.img_shape) == 2:
memory_file_name = pu.create_shared_name(input_file_names[0])
# the loaded file was a single image
sample_data = self.load_files(input_file_names, memory_file_name), memory_file_name
sample_data = self.load_files(input_file_names)
elif len(self.img_shape) == 3:
# the loaded file was a file containing a stack of images
sample_data = stack_loader.execute(self.load_func,
Expand All @@ -107,15 +103,14 @@ def load_sample_data(self, input_file_names):

return sample_data

def load_data(self, file_path) -> Tuple[Optional[np.ndarray], Optional[List[str]], Optional[str]]:
def load_data(self, file_path) -> Tuple[Optional[np.ndarray], Optional[List[str]]]:
if file_path:
file_names = get_file_names(os.path.dirname(file_path), self.img_format, get_prefix(file_path))
memory_file_name = pu.create_shared_name(file_names[0])
return self.load_files(file_names, memory_file_name), file_names, memory_file_name
return None, None, None
return self.load_files(file_names), file_names
return None, None

def _do_files_load_seq(self, data, files, name):
progress = Progress.ensure_instance(self.progress, num_steps=len(files), task_name=f'Load {name}')
def _do_files_load_seq(self, data, files):
progress = Progress.ensure_instance(self.progress, num_steps=len(files), task_name='Loading')

with progress:
for idx, in_file in enumerate(files):
Expand All @@ -132,13 +127,13 @@ def _do_files_load_seq(self, data, files, name):

return data

def load_files(self, files, memory_name=None) -> np.ndarray:
def load_files(self, files) -> np.ndarray:
# Zeroing here to make sure that we can allocate the memory.
# If it's not possible better crash here than later.
num_images = len(files)
shape = (num_images, self.img_shape[0], self.img_shape[1])
data = pu.create_array(shape, self.data_dtype, memory_name)
return self._do_files_load_seq(data, files, memory_name)
data = pu.create_array(shape, self.data_dtype)
return self._do_files_load_seq(data, files)


def _get_data_average(data):
Expand Down
1 change: 0 additions & 1 deletion mantidimaging/core/io/loader/loader.py
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,6 @@ def read_in_file_information(input_path,

# construct and return the new shape
shape = (len(input_file_names), ) + images.data[0].shape
images.free_memory()

fi = FileInformation(filenames=input_file_names, shape=shape, sinograms=images.is_sinograms)
return fi
Expand Down
2 changes: 1 addition & 1 deletion mantidimaging/core/io/loader/stack_loader.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ def execute(load_func, file_name, dtype, name, indices=None, progress=None):
new_data = new_data[indices[0]:indices[1]:indices[2]]

img_shape = new_data.shape
data = pu.create_array(img_shape, dtype=dtype, name=f"{file_name}-Stack")
data = pu.create_array(img_shape, dtype=dtype)

# we could just move with data[:] = new_data[:] but then we don't get
# loading bar information, and I doubt there's any performance gain
Expand Down
39 changes: 0 additions & 39 deletions mantidimaging/core/io/test/io_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,16 +21,6 @@ def __init__(self, *args, **kwargs):
# force silent outputs
initialise_logging()

@classmethod
def setUpClass(cls) -> None:
import SharedArray as sa
for arr in sa.list():
sa.delete(arr.name.decode("utf-8"))

def tearDown(self):
import SharedArray as sa
assert len(sa.list()) == 0

def assert_files_exist(self, base_name, file_format, stack=True, num_images=1, indices=None):

if not stack:
Expand Down Expand Up @@ -135,15 +125,6 @@ def do_preproc(self, img_format, loader_indices=None, expected_len=None, saver_i
expected_images.data = expected_images.data[loader_indices[0]:loader_indices[1]]

npt.assert_equal(loaded_images.data, expected_images.data)
loaded_images.free_memory()
if dataset.dark_before:
dataset.dark_before.free_memory()
if dataset.dark_after:
dataset.dark_after.free_memory()
if dataset.flat_before:
dataset.flat_before.free_memory()
if dataset.flat_after:
dataset.flat_after.free_memory()

def test_load_sample_flat_and_dark(self,
img_format='tiff',
Expand Down Expand Up @@ -221,16 +202,6 @@ def test_load_sample_flat_and_dark(self,
npt.assert_equal(dataset.flat_after.data, flat_after.data)
npt.assert_equal(dataset.dark_after.data, dark_after.data)

loaded_images.free_memory()
if dataset.dark_before:
dataset.dark_before.free_memory()
if dataset.flat_before:
dataset.flat_before.free_memory()
if dataset.dark_after:
dataset.dark_after.free_memory()
if dataset.flat_after:
dataset.flat_after.free_memory()

def test_metadata_round_trip(self):
# Create dummy image stack
sample = th.gen_img_shared_array_with_val(42.)
Expand All @@ -247,16 +218,6 @@ def test_metadata_round_trip(self):
# Ensure properties have been preserved
self.assertEqual(loaded_images.metadata, images.metadata)

loaded_images.free_memory()
if dataset.dark_before:
dataset.dark_before.free_memory()
if dataset.dark_after:
dataset.dark_after.free_memory()
if dataset.flat_before:
dataset.flat_before.free_memory()
if dataset.flat_after:
dataset.flat_after.free_memory()


if __name__ == '__main__':
unittest.main()
7 changes: 2 additions & 5 deletions mantidimaging/core/operations/crop_coords/crop_coords.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,10 +65,7 @@ def filter_func(images: Images,
raise ValueError("It seems the Region of Interest is outside of the current image dimensions.\n"
"This can happen on the image preview right after a previous Crop Coordinates.")

# allocate output first BEFORE freeing the original data,
# otherwise it's possible to free and then fail allocation for output
# at which point you're left with no data
output = pu.allocate_output(images, shape)
output = pu.create_array(shape, images.dtype)
images.data = execute_single(sample, region_of_interest, progress, out=output)

return images
Expand Down Expand Up @@ -116,4 +113,4 @@ def execute_single(data, roi, progress=None, out=None):

output = out[:] if out is not None else data[:]
output[:] = data[:, roi.top:roi.bottom, roi.left:roi.right]
return output
return output
Loading

0 comments on commit 3d84374

Please sign in to comment.