From 778d777c8d683ee5ce63f7d423b4a311b2c0ab47 Mon Sep 17 00:00:00 2001 From: Garrett Wright Date: Tue, 2 Jul 2024 09:21:21 -0400 Subject: [PATCH 001/139] 10081 pipeline doc and sym updates --- .../experimental_abinitio_pipeline_10081.py | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/gallery/experiments/experimental_abinitio_pipeline_10081.py b/gallery/experiments/experimental_abinitio_pipeline_10081.py index be27bc6e43..0711f8cb4b 100644 --- a/gallery/experiments/experimental_abinitio_pipeline_10081.py +++ b/gallery/experiments/experimental_abinitio_pipeline_10081.py @@ -59,7 +59,11 @@ # Create a source object for the experimental images src = RelionSource( - starfile_in, pixel_size=pixel_size, max_rows=n_imgs, data_folder=data_folder + starfile_in, + pixel_size=pixel_size, + max_rows=n_imgs, + data_folder=data_folder, + symmetry_group="C4", ) # Downsample the images @@ -115,12 +119,13 @@ # Volume Reconstruction # ---------------------- # -# Using the oriented source, attempt to reconstruct a volume. -# Since this is a Cn symmetric molecule, as indicated by -# ``symmetry="C4"`` above, the ``avgs`` images set will be repeated -# for each of the 3 additional rotations during the back-projection -# step. This boosts the effective number of images used in the -# reconstruction from ``n_classes`` to ``4*n_classes``. +# Using the oriented source, attempt to reconstruct a volume. Since +# this is a Cn symmetric molecule, as specified by ``RelionSource(..., +# symmetry_group="C4, ...)"``, the ``symmetry_group`` source attribute +# will flow through the pipeline to ``avgs``. Then each image will be +# repeated for each of the 3 additional rotations during +# back-projection. This boosts the effective number of images used in +# the reconstruction from ``n_classes`` to ``4*n_classes``. logger.info("Begin Volume reconstruction") From eaa9437e97277cd3ae34c9922f75c8edef8d8f19 Mon Sep 17 00:00:00 2001 From: Garrett Wright Date: Tue, 2 Jul 2024 09:22:00 -0400 Subject: [PATCH 002/139] CL sync c3c4 eps change, and numerical issue --- src/aspire/abinitio/commonline_c3_c4.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/aspire/abinitio/commonline_c3_c4.py b/src/aspire/abinitio/commonline_c3_c4.py index 8e9652258a..68469618dd 100644 --- a/src/aspire/abinitio/commonline_c3_c4.py +++ b/src/aspire/abinitio/commonline_c3_c4.py @@ -47,7 +47,7 @@ def __init__( n_theta=None, max_shift=0.15, shift_step=1, - epsilon=1e-3, + epsilon=1e-2, max_iters=1000, degree_res=1, seed=None, @@ -691,7 +691,8 @@ def _J_sync_power_method(self, vijs): ) while itr < max_iters and residual > epsilon: itr += 1 - vec_new = self._signs_times_v(vijs, vec) + # Note, this appears to need double precision for accuracy in the following division. + vec_new = self._signs_times_v(vijs, vec).astype(np.float64, copy=False) vec_new = vec_new / norm(vec_new) residual = norm(vec_new - vec) vec = vec_new From 92b2e93ab4b5187ad3b7c0500bca003cd8ff375c Mon Sep 17 00:00:00 2001 From: Garrett Wright Date: Wed, 10 Jul 2024 08:34:25 -0400 Subject: [PATCH 003/139] fix doc " typo --- gallery/experiments/experimental_abinitio_pipeline_10081.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gallery/experiments/experimental_abinitio_pipeline_10081.py b/gallery/experiments/experimental_abinitio_pipeline_10081.py index 0711f8cb4b..838b2c2d5a 100644 --- a/gallery/experiments/experimental_abinitio_pipeline_10081.py +++ b/gallery/experiments/experimental_abinitio_pipeline_10081.py @@ -121,7 +121,7 @@ # # Using the oriented source, attempt to reconstruct a volume. Since # this is a Cn symmetric molecule, as specified by ``RelionSource(..., -# symmetry_group="C4, ...)"``, the ``symmetry_group`` source attribute +# symmetry_group="C4", ...)``, the ``symmetry_group`` source attribute # will flow through the pipeline to ``avgs``. Then each image will be # repeated for each of the 3 additional rotations during # back-projection. This boosts the effective number of images used in From 01058cc25eca8c2b47272484f245e8f3d716d59d Mon Sep 17 00:00:00 2001 From: Garrett Wright Date: Wed, 10 Jul 2024 08:46:05 -0400 Subject: [PATCH 004/139] Log a diagnostic whether we are actually boosting anything --- src/aspire/image/image.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/aspire/image/image.py b/src/aspire/image/image.py index ff2353d333..1cb5ece1ab 100644 --- a/src/aspire/image/image.py +++ b/src/aspire/image/image.py @@ -517,6 +517,8 @@ def backproject(self, rot_matrices, symmetry_group=None): # Get symmetry rotations from SymmetryGroup. symmetry_rots = SymmetryGroup.parse(symmetry_group, dtype=self.dtype).matrices + if len(symmetry_rots) > 1: + logger.info("Boosting with {len(symmetry_rots)} rotational symmetries.") # Compute Fourier transform of images. im_f = xp.asnumpy(fft.centered_fft2(xp.asarray(self._data))) / (L**2) From db844878f7955c842dea4452886d390ae8cab69c Mon Sep 17 00:00:00 2001 From: Josh Carmichael Date: Mon, 8 Jul 2024 13:22:38 -0400 Subject: [PATCH 005/139] symmetry_group pass-through for ClassAvgSource. --- src/aspire/denoising/class_avg.py | 1 + tests/test_class_src.py | 12 +++++++++++- 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/src/aspire/denoising/class_avg.py b/src/aspire/denoising/class_avg.py index 586e0a08a9..20c3694dbf 100644 --- a/src/aspire/denoising/class_avg.py +++ b/src/aspire/denoising/class_avg.py @@ -76,6 +76,7 @@ def __init__( L=self.averager.src.L, n=self.averager.src.n, dtype=self.averager.src.dtype, + symmetry_group=self.src.symmetry_group, ) # Any further operations should not mutate this instance. diff --git a/tests/test_class_src.py b/tests/test_class_src.py index 0c169621cd..e395aecc8e 100644 --- a/tests/test_class_src.py +++ b/tests/test_class_src.py @@ -128,7 +128,14 @@ def class_sim_fixture(dtype, img_size): # Note using a single volume via C=1 is critical to matching # alignment without the complexity of remapping via states etc. src = Simulation( - L=img_size, n=n, vols=v, offsets=0, amplitudes=1, C=1, angles=true_rots.angles + L=img_size, + n=n, + vols=v, + offsets=0, + amplitudes=1, + C=1, + angles=true_rots.angles, + symmetry_group="C4", # For testing symmetry_group pass-through. ) # Prefetch all the images src = src.cache() @@ -193,6 +200,9 @@ class averages. k = len(src2.class_indices) np.testing.assert_equal(src2.class_indices, test_src.class_indices[::3][:k]) + # Check symmetry_group pass-through. + assert test_src.symmetry_group == class_sim_fixture.symmetry_group + # Test the _HeapItem helper class def test_heap_helper(): From 2511f43d87c5c7c1de7e636af8872a60c003d80f Mon Sep 17 00:00:00 2001 From: Josh Carmichael Date: Wed, 10 Jul 2024 13:38:22 -0400 Subject: [PATCH 006/139] Add symmetry_group to test_indexed_source. --- tests/test_indexed_source.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tests/test_indexed_source.py b/tests/test_indexed_source.py index 30a23ee16b..9ce35c7052 100644 --- a/tests/test_indexed_source.py +++ b/tests/test_indexed_source.py @@ -13,7 +13,7 @@ def sim_fixture(): """ Generate a very small simulation and slice it. """ - sim = Simulation(L=8, n=10, C=1) + sim = Simulation(L=8, n=10, C=1, symmetry_group="D3") sim2 = sim[0::2] # Slice the evens return sim, sim2 @@ -31,6 +31,9 @@ def test_remapping(sim_fixture): # Check meta is served correctly. assert np.all(sim.get_metadata(indices=sim2.index_map) == sim2.get_metadata()) + # Check symmetry_group pass-through. + assert sim.symmetry_group == sim2.symmetry_group + def test_repr(sim_fixture): sim, sim2 = sim_fixture From 2b63e39e4c8325d502d113c46a039cbc7a47c0a7 Mon Sep 17 00:00:00 2001 From: Garrett Wright Date: Fri, 12 Jul 2024 09:53:15 -0400 Subject: [PATCH 007/139] missing f in log message --- src/aspire/image/image.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/aspire/image/image.py b/src/aspire/image/image.py index 1cb5ece1ab..fd160d1644 100644 --- a/src/aspire/image/image.py +++ b/src/aspire/image/image.py @@ -518,7 +518,7 @@ def backproject(self, rot_matrices, symmetry_group=None): # Get symmetry rotations from SymmetryGroup. symmetry_rots = SymmetryGroup.parse(symmetry_group, dtype=self.dtype).matrices if len(symmetry_rots) > 1: - logger.info("Boosting with {len(symmetry_rots)} rotational symmetries.") + logger.info(f"Boosting with {len(symmetry_rots)} rotational symmetries.") # Compute Fourier transform of images. im_f = xp.asnumpy(fft.centered_fft2(xp.asarray(self._data))) / (L**2) From e92971e6e4a60d4dbcd5b1abef587dc3db19fcad Mon Sep 17 00:00:00 2001 From: Garrett Wright Date: Mon, 15 Jul 2024 09:41:35 -0400 Subject: [PATCH 008/139] use utest_tolerance for single precision run to run variability --- tests/test_covar2d_denoiser.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tests/test_covar2d_denoiser.py b/tests/test_covar2d_denoiser.py index a403a72109..7b4da5511e 100644 --- a/tests/test_covar2d_denoiser.py +++ b/tests/test_covar2d_denoiser.py @@ -6,6 +6,7 @@ from aspire.noise import WhiteNoiseAdder from aspire.operators import IdentityFilter, RadialCTFFilter from aspire.source import Simulation +from aspire.utils import utest_tolerance # TODO, parameterize these further. dtype = np.float32 @@ -89,7 +90,9 @@ def test_batched_rotcov2d_MSE(sim, basis): # Additionally test the `DenoisedSource` and lazy-eval-cache # of the cov2d estimator. src = DenoisedSource(sim, denoiser) - np.testing.assert_allclose(imgs_denoised, src.images[:], rtol=1e-05, atol=1e-08) + np.testing.assert_allclose( + imgs_denoised, src.images[:], rtol=1e-05, atol=utest_tolerance(src.dtype) + ) def test_source_mismatch(sim, basis): From 6e45cfc52fe45631a64fa0905eb0337bf4ed52a7 Mon Sep 17 00:00:00 2001 From: Marc Karimi Date: Thu, 13 Jun 2024 13:41:07 -0400 Subject: [PATCH 009/139] added 2D projection stub --- src/aspire/image/image.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/aspire/image/image.py b/src/aspire/image/image.py index fd160d1644..392b5fd15d 100644 --- a/src/aspire/image/image.py +++ b/src/aspire/image/image.py @@ -186,6 +186,9 @@ def __init__(self, data, dtype=None): self.__array_interface__ = self._data.__array_interface__ self.__array__ = self._data + def project(self, angles): + """docstring""" + @property def res(self): warn( From 5ac3cbc4c31854ea5d67372f8192c9c59acbf736 Mon Sep 17 00:00:00 2001 From: Marc Karimi Date: Mon, 17 Jun 2024 11:57:06 -0400 Subject: [PATCH 010/139] initial test file add --- tests/test_sinogram.py | 1 + 1 file changed, 1 insertion(+) create mode 100644 tests/test_sinogram.py diff --git a/tests/test_sinogram.py b/tests/test_sinogram.py new file mode 100644 index 0000000000..5871ed8eef --- /dev/null +++ b/tests/test_sinogram.py @@ -0,0 +1 @@ +import pytest From fa5f37bf2544f046297ea580b4ead1c9677af62d Mon Sep 17 00:00:00 2001 From: Marc Karimi Date: Tue, 18 Jun 2024 12:33:31 -0400 Subject: [PATCH 011/139] Stashing initial project with test placeholder --- src/aspire/image/image.py | 37 ++++++++++++++++++++++++++++++++++++- tests/test_sinogram.py | 22 ++++++++++++++++++++++ 2 files changed, 58 insertions(+), 1 deletion(-) diff --git a/src/aspire/image/image.py b/src/aspire/image/image.py index 392b5fd15d..dfa526082b 100644 --- a/src/aspire/image/image.py +++ b/src/aspire/image/image.py @@ -9,6 +9,7 @@ from scipy.linalg import lstsq import aspire.volume +import finufft from aspire.nufft import anufft from aspire.numeric import fft, xp from aspire.utils import FourierRingCorrelation, anorm, crop_pad_2d, grid_2d @@ -187,7 +188,41 @@ def __init__(self, data, dtype=None): self.__array__ = self._data def project(self, angles): - """docstring""" + """docstring + angles: radians + """ + n_points = self.resolution # number of points to sample on radial line in polar grid + + nufft_type=2 + eps=1e-8 + + n_trans = self.n_images + assert n_trans == 1 + + # 2-D grid + + y_idx = np.arange(-n_points / 2, n_points / 2) / n_points * 2 + + x_theta = y_idx[:, np.newaxis] * np.sin(angles)[np.newaxis, :] + x_theta = np.pi * x_theta.flatten() + + y_theta = y_idx[:, np.newaxis] * np.cos(angles)[np.newaxis, :] + y_theta = np.pi * y_theta.flatten() + + # NUFFT + plan = finufft.Plan(nufft_type, (self.resolution, self.resolution), n_trans, eps) + plan.setpts(x_theta, y_theta) + + freqs = np.abs(np.pi * y_idx) + n_lines = len(angles) + + # compute the polar nufft + image_ft = plan.execute(self._data.astype(np.complex128)).reshape(n_points, n_lines) + + # compute the Radon transform (sinogram) + + image_rt = np.fft.fftshift(np.fft.ifft(np.fft.ifftshift(image_ft, axes=0), axis=0), axes=0).real + return image_rt @property def res(self): diff --git a/tests/test_sinogram.py b/tests/test_sinogram.py index 5871ed8eef..8179e83788 100644 --- a/tests/test_sinogram.py +++ b/tests/test_sinogram.py @@ -1 +1,23 @@ import pytest +from skimage import data +from skimage.transform import radon +from aspire.image import Image +import numpy as np + +#Image.project and compare results to skimage.radon + +def test_image_project(): + image = Image(data.camera().astype(np.float64)) + ny = image.resolution + angles = np.linspace(0, 360, ny, endpoint=False) + rads = angles / 180 * np.pi + s = image.project(rads) + + # add reference skimage radon here + + + #compare s with reference + + + + From f6834b23b35d147a99af81b36135e56e4239d547 Mon Sep 17 00:00:00 2001 From: Marc Karimi Date: Tue, 18 Jun 2024 12:35:17 -0400 Subject: [PATCH 012/139] Style Updates --- src/aspire/image/image.py | 26 +++++++++++++++++--------- tests/test_sinogram.py | 13 +++++-------- 2 files changed, 22 insertions(+), 17 deletions(-) diff --git a/src/aspire/image/image.py b/src/aspire/image/image.py index dfa526082b..02185ff0a2 100644 --- a/src/aspire/image/image.py +++ b/src/aspire/image/image.py @@ -2,6 +2,7 @@ import os from warnings import catch_warnings, filterwarnings, simplefilter, warn +import finufft import matplotlib.pyplot as plt import mrcfile import numpy as np @@ -9,7 +10,6 @@ from scipy.linalg import lstsq import aspire.volume -import finufft from aspire.nufft import anufft from aspire.numeric import fft, xp from aspire.utils import FourierRingCorrelation, anorm, crop_pad_2d, grid_2d @@ -191,16 +191,18 @@ def project(self, angles): """docstring angles: radians """ - n_points = self.resolution # number of points to sample on radial line in polar grid + n_points = ( + self.resolution + ) # number of points to sample on radial line in polar grid + + nufft_type = 2 + eps = 1e-8 - nufft_type=2 - eps=1e-8 - n_trans = self.n_images assert n_trans == 1 # 2-D grid - + y_idx = np.arange(-n_points / 2, n_points / 2) / n_points * 2 x_theta = y_idx[:, np.newaxis] * np.sin(angles)[np.newaxis, :] @@ -210,18 +212,24 @@ def project(self, angles): y_theta = np.pi * y_theta.flatten() # NUFFT - plan = finufft.Plan(nufft_type, (self.resolution, self.resolution), n_trans, eps) + plan = finufft.Plan( + nufft_type, (self.resolution, self.resolution), n_trans, eps + ) plan.setpts(x_theta, y_theta) freqs = np.abs(np.pi * y_idx) n_lines = len(angles) # compute the polar nufft - image_ft = plan.execute(self._data.astype(np.complex128)).reshape(n_points, n_lines) + image_ft = plan.execute(self._data.astype(np.complex128)).reshape( + n_points, n_lines + ) # compute the Radon transform (sinogram) - image_rt = np.fft.fftshift(np.fft.ifft(np.fft.ifftshift(image_ft, axes=0), axis=0), axes=0).real + image_rt = np.fft.fftshift( + np.fft.ifft(np.fft.ifftshift(image_ft, axes=0), axis=0), axes=0 + ).real return image_rt @property diff --git a/tests/test_sinogram.py b/tests/test_sinogram.py index 8179e83788..d3b7d6fb3d 100644 --- a/tests/test_sinogram.py +++ b/tests/test_sinogram.py @@ -1,10 +1,12 @@ +import numpy as np import pytest from skimage import data from skimage.transform import radon + from aspire.image import Image -import numpy as np -#Image.project and compare results to skimage.radon +# Image.project and compare results to skimage.radon + def test_image_project(): image = Image(data.camera().astype(np.float64)) @@ -15,9 +17,4 @@ def test_image_project(): # add reference skimage radon here - - #compare s with reference - - - - + # compare s with reference From aef46194ba558ca01025fc9a14a5a932a68d18fc Mon Sep 17 00:00:00 2001 From: Marc Karimi Date: Fri, 21 Jun 2024 12:00:34 -0400 Subject: [PATCH 013/139] Pytest fixtures --- tests/test_sinogram.py | 62 ++++++++++++++++++++++++++++++++++++++---- 1 file changed, 56 insertions(+), 6 deletions(-) diff --git a/tests/test_sinogram.py b/tests/test_sinogram.py index d3b7d6fb3d..cf095f649a 100644 --- a/tests/test_sinogram.py +++ b/tests/test_sinogram.py @@ -1,20 +1,70 @@ +import itertools + import numpy as np import pytest from skimage import data -from skimage.transform import radon +from skimage.transform import radon, resize from aspire.image import Image +from aspire.utils import grid_2d + +# parameter img_sizes: 511, 512 +IMG_SIZES = [ + 511, + 512, +] + +# parameter dtype: float32, float64 +DTYPES = [ + np.float32, + np.float64, +] + + +@pytest.fixture(params=DTYPES, ids=lambda x: f"dtype={x}", scope="module") +def dtype(request): + """ + Dtypes for image. + """ + return request.param -# Image.project and compare results to skimage.radon +@pytest.fixture(params=IMG_SIZES, ids=lambda x: f"px={x}", scope="module") +def img_size(request): + """ + Image size. + """ + return request.param -def test_image_project(): - image = Image(data.camera().astype(np.float64)) - ny = image.resolution + +@pytest.fixture +def masked_image(dtype, img_size): + """ + Construct a masked image fixture that takes paramters + """ + g = grid_2d(img_size, normalized=True, shifted=True) + mask = g["r"] < 1 + + # add more logic to check the sizes and readjust accordingly + image = data.camera().astype(dtype) + image = image[:img_size, :img_size] + return Image(image * mask) + + +# Image.project and compare results to skimage.radon +def test_image_project(masked_image): + ny = masked_image.resolution angles = np.linspace(0, 360, ny, endpoint=False) rads = angles / 180 * np.pi - s = image.project(rads) + s = masked_image.project(rads) # add reference skimage radon here + n = masked_image._data[0] + print(s.shape) + print(n.shape) + reference_sinogram = radon(n, theta=angles) # compare s with reference + np.testing.assert_allclose(s, reference_sinogram, rtol=11, atol=1e-8) + + # create fixture called masked_image(img_size) -> return: masked image of size (grid generation goes in fixture) From df8953d9179b3a06940885449e511726cea189eb Mon Sep 17 00:00:00 2001 From: Marc Karimi Date: Fri, 21 Jun 2024 12:21:14 -0400 Subject: [PATCH 014/139] Cleanup --- src/aspire/image/image.py | 6 ++---- tests/test_sinogram.py | 8 ++------ 2 files changed, 4 insertions(+), 10 deletions(-) diff --git a/src/aspire/image/image.py b/src/aspire/image/image.py index 02185ff0a2..40546aff1a 100644 --- a/src/aspire/image/image.py +++ b/src/aspire/image/image.py @@ -191,9 +191,8 @@ def project(self, angles): """docstring angles: radians """ - n_points = ( - self.resolution - ) # number of points to sample on radial line in polar grid + # number of points to sample on radial line in polar grid + n_points = self.resolution nufft_type = 2 eps = 1e-8 @@ -217,7 +216,6 @@ def project(self, angles): ) plan.setpts(x_theta, y_theta) - freqs = np.abs(np.pi * y_idx) n_lines = len(angles) # compute the polar nufft diff --git a/tests/test_sinogram.py b/tests/test_sinogram.py index cf095f649a..de65ce252c 100644 --- a/tests/test_sinogram.py +++ b/tests/test_sinogram.py @@ -1,9 +1,7 @@ -import itertools - import numpy as np import pytest from skimage import data -from skimage.transform import radon, resize +from skimage.transform import radon from aspire.image import Image from aspire.utils import grid_2d @@ -60,9 +58,7 @@ def test_image_project(masked_image): # add reference skimage radon here n = masked_image._data[0] - print(s.shape) - print(n.shape) - reference_sinogram = radon(n, theta=angles) + reference_sinogram = radon(n, theta=angles[::-1]) # compare s with reference np.testing.assert_allclose(s, reference_sinogram, rtol=11, atol=1e-8) From 9970e815fbf8fdbd8a1fd4990a13dc09e5b2bf72 Mon Sep 17 00:00:00 2001 From: Marc Karimi Date: Fri, 21 Jun 2024 13:01:21 -0400 Subject: [PATCH 015/139] changed nufft call --- src/aspire/image/image.py | 24 +++++------------------- 1 file changed, 5 insertions(+), 19 deletions(-) diff --git a/src/aspire/image/image.py b/src/aspire/image/image.py index 40546aff1a..2d771766b0 100644 --- a/src/aspire/image/image.py +++ b/src/aspire/image/image.py @@ -2,7 +2,6 @@ import os from warnings import catch_warnings, filterwarnings, simplefilter, warn -import finufft import matplotlib.pyplot as plt import mrcfile import numpy as np @@ -10,7 +9,7 @@ from scipy.linalg import lstsq import aspire.volume -from aspire.nufft import anufft +from aspire.nufft import anufft, nufft from aspire.numeric import fft, xp from aspire.utils import FourierRingCorrelation, anorm, crop_pad_2d, grid_2d from aspire.volume import SymmetryGroup @@ -194,37 +193,24 @@ def project(self, angles): # number of points to sample on radial line in polar grid n_points = self.resolution - nufft_type = 2 - eps = 1e-8 - n_trans = self.n_images assert n_trans == 1 # 2-D grid - y_idx = np.arange(-n_points / 2, n_points / 2) / n_points * 2 + pts = np.empty((2, n_points * len(angles)), dtype=self.dtype) x_theta = y_idx[:, np.newaxis] * np.sin(angles)[np.newaxis, :] - x_theta = np.pi * x_theta.flatten() + pts[0] = np.pi * x_theta.flatten() y_theta = y_idx[:, np.newaxis] * np.cos(angles)[np.newaxis, :] - y_theta = np.pi * y_theta.flatten() + pts[1] = np.pi * y_theta.flatten() # NUFFT - plan = finufft.Plan( - nufft_type, (self.resolution, self.resolution), n_trans, eps - ) - plan.setpts(x_theta, y_theta) - - n_lines = len(angles) - # compute the polar nufft - image_ft = plan.execute(self._data.astype(np.complex128)).reshape( - n_points, n_lines - ) + image_ft = nufft(self._data, pts).reshape(n_points, n_points) # compute the Radon transform (sinogram) - image_rt = np.fft.fftshift( np.fft.ifft(np.fft.ifftshift(image_ft, axes=0), axis=0), axes=0 ).real From eafe89e7283c87edbf9a60e66386b6e5fca02960 Mon Sep 17 00:00:00 2001 From: Garrett Wright Date: Fri, 21 Jun 2024 13:18:49 -0400 Subject: [PATCH 016/139] added stub for image stack line project marc to continue --- tests/test_sinogram.py | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/tests/test_sinogram.py b/tests/test_sinogram.py index de65ce252c..020e8b58bf 100644 --- a/tests/test_sinogram.py +++ b/tests/test_sinogram.py @@ -4,6 +4,7 @@ from skimage.transform import radon from aspire.image import Image +from aspire.source import Simulation from aspire.utils import grid_2d # parameter img_sizes: 511, 512 @@ -64,3 +65,32 @@ def test_image_project(masked_image): np.testing.assert_allclose(s, reference_sinogram, rtol=11, atol=1e-8) # create fixture called masked_image(img_size) -> return: masked image of size (grid generation goes in fixture) + + +def test_multidim(): + """ + Test Image.project on stacks of images. + """ + + L = 32 # pixels + n = 3 + + # Generate a mask + g = grid_2d(L, normalized=True, shifted=True) + mask = g["r"] < 1 + + # Generate a simulation + src = Simulation(n=n, L=L, C=1, dtype=np.float64) + imgs = src.images[:] + + # Generate line project angles + ang_degrees = np.linspace(0, 180, L) + ang_rads = ang_degrees * np.pi / 180.0 + + # Call the line projection method + s = imgs.project(ang_rads) + + # # Compare with sk + # res = np.empty((n,L,L)) + # for i,img in enumerate(imgs.asnumpy()): + # #res[i] = radon(img ...) From c078f84f976a06b00f4d06ad99fc3a522c0630a4 Mon Sep 17 00:00:00 2001 From: Marc Karimi Date: Mon, 24 Jun 2024 12:03:41 -0400 Subject: [PATCH 017/139] Dimensional Test Fix --- src/aspire/image/image.py | 14 ++++++++------ tests/test_sinogram.py | 16 +++++++++------- 2 files changed, 17 insertions(+), 13 deletions(-) diff --git a/src/aspire/image/image.py b/src/aspire/image/image.py index 2d771766b0..ee0b46e3f0 100644 --- a/src/aspire/image/image.py +++ b/src/aspire/image/image.py @@ -194,7 +194,6 @@ def project(self, angles): n_points = self.resolution n_trans = self.n_images - assert n_trans == 1 # 2-D grid y_idx = np.arange(-n_points / 2, n_points / 2) / n_points * 2 @@ -207,13 +206,16 @@ def project(self, angles): pts[1] = np.pi * y_theta.flatten() # NUFFT - # compute the polar nufft - image_ft = nufft(self._data, pts).reshape(n_points, n_points) + # compute the polar nufft, create a + image_ft = nufft(self._data, pts).reshape(self.n_images, n_points, n_points) # compute the Radon transform (sinogram) - image_rt = np.fft.fftshift( - np.fft.ifft(np.fft.ifftshift(image_ft, axes=0), axis=0), axes=0 - ).real + image_rt = np.empty((self.n_images, n_points, n_points)) + for i in range(n_trans): + image_rt[i] = np.fft.fftshift( + np.fft.ifft(np.fft.ifftshift(image_ft[i], axes=0), axis=0), axes=0 + ).real + # previous code: image_rt = np.fft.fftshift(np.fft.ifft(np.fft.ifftshift(image_ft, axes=0), axis=0), axes=0).real return image_rt @property diff --git a/tests/test_sinogram.py b/tests/test_sinogram.py index 020e8b58bf..b9b2f3912f 100644 --- a/tests/test_sinogram.py +++ b/tests/test_sinogram.py @@ -62,7 +62,7 @@ def test_image_project(masked_image): reference_sinogram = radon(n, theta=angles[::-1]) # compare s with reference - np.testing.assert_allclose(s, reference_sinogram, rtol=11, atol=1e-8) + np.testing.assert_allclose(s[0], reference_sinogram, rtol=11, atol=1e-8) # create fixture called masked_image(img_size) -> return: masked image of size (grid generation goes in fixture) @@ -72,7 +72,7 @@ def test_multidim(): Test Image.project on stacks of images. """ - L = 32 # pixels + L = 64 # pixels n = 3 # Generate a mask @@ -81,16 +81,18 @@ def test_multidim(): # Generate a simulation src = Simulation(n=n, L=L, C=1, dtype=np.float64) - imgs = src.images[:] + imgs = src.images[:] * mask # Generate line project angles - ang_degrees = np.linspace(0, 180, L) + ang_degrees = np.linspace(0, 180, L, endpoint=False) ang_rads = ang_degrees * np.pi / 180.0 # Call the line projection method s = imgs.project(ang_rads) # # Compare with sk - # res = np.empty((n,L,L)) - # for i,img in enumerate(imgs.asnumpy()): - # #res[i] = radon(img ...) + res = np.empty((n, L, L)) + for i, img in enumerate(imgs._data): + res[i] = radon(img, theta=ang_rads[::-1]) + + np.testing.assert_allclose(s, res, rtol=12, atol=1e-8) From 49ecd4b7489538785b12444529435813273b1c38 Mon Sep 17 00:00:00 2001 From: Marc Karimi Date: Mon, 24 Jun 2024 12:26:06 -0400 Subject: [PATCH 018/139] Multidim FFT --- src/aspire/image/image.py | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/src/aspire/image/image.py b/src/aspire/image/image.py index ee0b46e3f0..c88a17e6a3 100644 --- a/src/aspire/image/image.py +++ b/src/aspire/image/image.py @@ -196,7 +196,7 @@ def project(self, angles): n_trans = self.n_images # 2-D grid - y_idx = np.arange(-n_points / 2, n_points / 2) / n_points * 2 + y_idx = np.arange(-n_points / 2, n_points / 2, dtype=self.dtype) / n_points * 2 pts = np.empty((2, n_points * len(angles)), dtype=self.dtype) x_theta = y_idx[:, np.newaxis] * np.sin(angles)[np.newaxis, :] @@ -210,12 +210,10 @@ def project(self, angles): image_ft = nufft(self._data, pts).reshape(self.n_images, n_points, n_points) # compute the Radon transform (sinogram) - image_rt = np.empty((self.n_images, n_points, n_points)) - for i in range(n_trans): - image_rt[i] = np.fft.fftshift( - np.fft.ifft(np.fft.ifftshift(image_ft[i], axes=0), axis=0), axes=0 - ).real - # previous code: image_rt = np.fft.fftshift(np.fft.ifft(np.fft.ifftshift(image_ft, axes=0), axis=0), axes=0).real + image_rt = np.fft.fftshift( + np.fft.ifftn(np.fft.ifftshift(image_ft, axes=(0, 1)), axes=(0, 1)), + axes=(0, 1), + ).real return image_rt @property From 399be2f17505dd728edf457cfdc50829f184cd3d Mon Sep 17 00:00:00 2001 From: Marc Karimi Date: Tue, 25 Jun 2024 11:43:25 -0400 Subject: [PATCH 019/139] Integrated stack reshape to project --- src/aspire/image/image.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/src/aspire/image/image.py b/src/aspire/image/image.py index c88a17e6a3..f30c82ca3b 100644 --- a/src/aspire/image/image.py +++ b/src/aspire/image/image.py @@ -192,8 +192,7 @@ def project(self, angles): """ # number of points to sample on radial line in polar grid n_points = self.resolution - - n_trans = self.n_images + original_stack = self.stack_shape # 2-D grid y_idx = np.arange(-n_points / 2, n_points / 2, dtype=self.dtype) / n_points * 2 @@ -206,14 +205,17 @@ def project(self, angles): pts[1] = np.pi * y_theta.flatten() # NUFFT - # compute the polar nufft, create a - image_ft = nufft(self._data, pts).reshape(self.n_images, n_points, n_points) + # compute the polar nufft + image_ft = nufft(self.stack_reshape(-1)._data, pts).reshape( + self.n_images, n_points, n_points + ) # compute the Radon transform (sinogram) image_rt = np.fft.fftshift( np.fft.ifftn(np.fft.ifftshift(image_ft, axes=(0, 1)), axes=(0, 1)), axes=(0, 1), ).real + image_rt = image_rt.reshape(*original_stack, n_points, n_points) return image_rt @property From 7994ecd8580fc4e6170a0b561d19a07a9263b3d0 Mon Sep 17 00:00:00 2001 From: Marc Karimi Date: Wed, 26 Jun 2024 15:46:26 -0400 Subject: [PATCH 020/139] Fleshed out Image Project Single and Multidim Tests --- tests/test_sinogram.py | 37 ++++++++++++++++++++++++------------- 1 file changed, 24 insertions(+), 13 deletions(-) diff --git a/tests/test_sinogram.py b/tests/test_sinogram.py index b9b2f3912f..eb42b0a871 100644 --- a/tests/test_sinogram.py +++ b/tests/test_sinogram.py @@ -44,7 +44,6 @@ def masked_image(dtype, img_size): g = grid_2d(img_size, normalized=True, shifted=True) mask = g["r"] < 1 - # add more logic to check the sizes and readjust accordingly image = data.camera().astype(dtype) image = image[:img_size, :img_size] return Image(image * mask) @@ -52,6 +51,9 @@ def masked_image(dtype, img_size): # Image.project and compare results to skimage.radon def test_image_project(masked_image): + """ + TestImage.project on a single stack of images. Compares project method with skimage. + """ ny = masked_image.resolution angles = np.linspace(0, 360, ny, endpoint=False) rads = angles / 180 * np.pi @@ -62,9 +64,15 @@ def test_image_project(masked_image): reference_sinogram = radon(n, theta=angles[::-1]) # compare s with reference - np.testing.assert_allclose(s[0], reference_sinogram, rtol=11, atol=1e-8) + nrms = np.sqrt(np.mean((s[0] - reference_sinogram) ** 2, axis=0)) / np.linalg.norm( + reference_sinogram, axis=0 + ) + tol = 0.002 - # create fixture called masked_image(img_size) -> return: masked image of size (grid generation goes in fixture) + # odd image tolerance (stink) + if masked_image.resolution % 2 == 1: + tol = 0.02 + np.testing.assert_array_less(nrms, tol, "Error in test image") def test_multidim(): @@ -72,7 +80,7 @@ def test_multidim(): Test Image.project on stacks of images. """ - L = 64 # pixels + L = 512 # pixels n = 3 # Generate a mask @@ -84,15 +92,18 @@ def test_multidim(): imgs = src.images[:] * mask # Generate line project angles - ang_degrees = np.linspace(0, 180, L, endpoint=False) - ang_rads = ang_degrees * np.pi / 180.0 - - # Call the line projection method - s = imgs.project(ang_rads) + angles = np.linspace(0, 180, L, endpoint=False) + rads = angles / 180.0 * np.pi + s = imgs.project(rads) # # Compare with sk - res = np.empty((n, L, L)) + reference_sinograms = np.empty((n, L, L)) for i, img in enumerate(imgs._data): - res[i] = radon(img, theta=ang_rads[::-1]) - - np.testing.assert_allclose(s, res, rtol=12, atol=1e-8) + reference_sinograms[i] = radon(img, theta=angles[::-1]) + + # decrease tolerance as L goes up + for i in range(n): + nrms = np.sqrt( + np.mean((s[i] - reference_sinograms[i]) ** 2, axis=0) + ) / np.linalg.norm(reference_sinograms[i], axis=0) + np.testing.assert_array_less(nrms, 0.05, err_msg=f"Error in image {i}") From 59e4b2446425b50e89bae505d88602e7019c1236 Mon Sep 17 00:00:00 2001 From: Marc Karimi Date: Thu, 27 Jun 2024 12:35:05 -0400 Subject: [PATCH 021/139] Fixed the grid issues yay --- src/aspire/image/image.py | 16 +++++++--------- tests/test_sinogram.py | 6 +----- 2 files changed, 8 insertions(+), 14 deletions(-) diff --git a/src/aspire/image/image.py b/src/aspire/image/image.py index f30c82ca3b..c38e57dcde 100644 --- a/src/aspire/image/image.py +++ b/src/aspire/image/image.py @@ -195,19 +195,17 @@ def project(self, angles): original_stack = self.stack_shape # 2-D grid - y_idx = np.arange(-n_points / 2, n_points / 2, dtype=self.dtype) / n_points * 2 - pts = np.empty((2, n_points * len(angles)), dtype=self.dtype) + y_idx = np.fft.fftshift(np.fft.fftfreq(n_points)) + pts = np.empty((2, n_points, len(angles)), dtype=self.dtype) - x_theta = y_idx[:, np.newaxis] * np.sin(angles)[np.newaxis, :] - pts[0] = np.pi * x_theta.flatten() - - y_theta = y_idx[:, np.newaxis] * np.cos(angles)[np.newaxis, :] - pts[1] = np.pi * y_theta.flatten() + pts[0] = y_idx[:, np.newaxis] * np.sin(angles)[np.newaxis, :] + pts[1] = y_idx[:, np.newaxis] * np.cos(angles)[np.newaxis, :] + pts = pts.reshape(2, n_points * len(angles)) * 2 * np.pi # NUFFT # compute the polar nufft image_ft = nufft(self.stack_reshape(-1)._data, pts).reshape( - self.n_images, n_points, n_points + self.n_images, n_points, len(angles) ) # compute the Radon transform (sinogram) @@ -215,7 +213,7 @@ def project(self, angles): np.fft.ifftn(np.fft.ifftshift(image_ft, axes=(0, 1)), axes=(0, 1)), axes=(0, 1), ).real - image_rt = image_rt.reshape(*original_stack, n_points, n_points) + image_rt = image_rt.reshape(*original_stack, n_points, len(angles)) return image_rt @property diff --git a/tests/test_sinogram.py b/tests/test_sinogram.py index eb42b0a871..efa2c4aa3d 100644 --- a/tests/test_sinogram.py +++ b/tests/test_sinogram.py @@ -55,7 +55,7 @@ def test_image_project(masked_image): TestImage.project on a single stack of images. Compares project method with skimage. """ ny = masked_image.resolution - angles = np.linspace(0, 360, ny, endpoint=False) + angles = np.linspace(0, 360, ny + 1, endpoint=False) rads = angles / 180 * np.pi s = masked_image.project(rads) @@ -68,10 +68,6 @@ def test_image_project(masked_image): reference_sinogram, axis=0 ) tol = 0.002 - - # odd image tolerance (stink) - if masked_image.resolution % 2 == 1: - tol = 0.02 np.testing.assert_array_less(nrms, tol, "Error in test image") From 5532c3b79326b95665db4a974287ff5ede803198 Mon Sep 17 00:00:00 2001 From: Marc Karimi Date: Fri, 28 Jun 2024 11:52:09 -0400 Subject: [PATCH 022/139] Angle slow moving axis --- src/aspire/image/image.py | 14 +++++++------- tests/test_sinogram.py | 10 ++++++---- 2 files changed, 13 insertions(+), 11 deletions(-) diff --git a/src/aspire/image/image.py b/src/aspire/image/image.py index c38e57dcde..92aa780823 100644 --- a/src/aspire/image/image.py +++ b/src/aspire/image/image.py @@ -196,24 +196,24 @@ def project(self, angles): # 2-D grid y_idx = np.fft.fftshift(np.fft.fftfreq(n_points)) - pts = np.empty((2, n_points, len(angles)), dtype=self.dtype) + pts = np.empty((2, len(angles), n_points), dtype=self.dtype) - pts[0] = y_idx[:, np.newaxis] * np.sin(angles)[np.newaxis, :] - pts[1] = y_idx[:, np.newaxis] * np.cos(angles)[np.newaxis, :] + pts[0] = y_idx[np.newaxis, :] * np.sin(angles)[:, np.newaxis] + pts[1] = y_idx[np.newaxis, :] * np.cos(angles)[:, np.newaxis] pts = pts.reshape(2, n_points * len(angles)) * 2 * np.pi # NUFFT # compute the polar nufft image_ft = nufft(self.stack_reshape(-1)._data, pts).reshape( - self.n_images, n_points, len(angles) + self.n_images, len(angles), n_points ) # compute the Radon transform (sinogram) image_rt = np.fft.fftshift( - np.fft.ifftn(np.fft.ifftshift(image_ft, axes=(0, 1)), axes=(0, 1)), - axes=(0, 1), + np.fft.ifftn(np.fft.ifftshift(image_ft, axes=(0, 2)), axes=(0, 2)), + axes=(0, 2), ).real - image_rt = image_rt.reshape(*original_stack, n_points, len(angles)) + image_rt = image_rt.reshape(*original_stack, len(angles), n_points) return image_rt @property diff --git a/tests/test_sinogram.py b/tests/test_sinogram.py index efa2c4aa3d..2abe42f12e 100644 --- a/tests/test_sinogram.py +++ b/tests/test_sinogram.py @@ -58,14 +58,16 @@ def test_image_project(masked_image): angles = np.linspace(0, 360, ny + 1, endpoint=False) rads = angles / 180 * np.pi s = masked_image.project(rads) + assert s.shape == (1, len(angles), ny) # add reference skimage radon here n = masked_image._data[0] - reference_sinogram = radon(n, theta=angles[::-1]) - + reference_sinogram = radon(n, theta=angles[::-1]).T # transpose angles, points + assert reference_sinogram.shape == (len(angles), ny) # compare s with reference - nrms = np.sqrt(np.mean((s[0] - reference_sinogram) ** 2, axis=0)) / np.linalg.norm( - reference_sinogram, axis=0 + + nrms = np.sqrt(np.mean((s[0] - reference_sinogram) ** 2, axis=1)) / np.linalg.norm( + reference_sinogram, axis=1 ) tol = 0.002 np.testing.assert_array_less(nrms, tol, "Error in test image") From 6620a2e7108e31f4eaa5acec5547df5ed0cd4bcd Mon Sep 17 00:00:00 2001 From: Marc Karimi Date: Fri, 28 Jun 2024 15:50:29 -0400 Subject: [PATCH 023/139] Replaced FFT with rfft --- src/aspire/image/image.py | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/src/aspire/image/image.py b/src/aspire/image/image.py index 92aa780823..e12cf29598 100644 --- a/src/aspire/image/image.py +++ b/src/aspire/image/image.py @@ -5,6 +5,7 @@ import matplotlib.pyplot as plt import mrcfile import numpy as np +from numpy.fft import irfft from PIL import Image as PILImage from scipy.linalg import lstsq @@ -195,24 +196,27 @@ def project(self, angles): original_stack = self.stack_shape # 2-D grid - y_idx = np.fft.fftshift(np.fft.fftfreq(n_points)) - pts = np.empty((2, len(angles), n_points), dtype=self.dtype) + y_idx = np.fft.rfftfreq(n_points) * np.pi * 2 + n_real_points = len(y_idx) + + # y_idx = np.fft.fftshift(np.fft.fftfreq(n_points)) + pts = np.empty((2, len(angles), n_real_points), dtype=self.dtype) pts[0] = y_idx[np.newaxis, :] * np.sin(angles)[:, np.newaxis] pts[1] = y_idx[np.newaxis, :] * np.cos(angles)[:, np.newaxis] - pts = pts.reshape(2, n_points * len(angles)) * 2 * np.pi + pts = pts.reshape(2, n_real_points * len(angles)) # NUFFT # compute the polar nufft image_ft = nufft(self.stack_reshape(-1)._data, pts).reshape( - self.n_images, len(angles), n_points + self.n_images, len(angles), n_real_points ) # compute the Radon transform (sinogram) image_rt = np.fft.fftshift( - np.fft.ifftn(np.fft.ifftshift(image_ft, axes=(0, 2)), axes=(0, 2)), + np.fft.irfftn(image_ft, s=(self.n_images, n_points), axes=(0, 2)), axes=(0, 2), - ).real + ) image_rt = image_rt.reshape(*original_stack, len(angles), n_points) return image_rt From 223065f58c0ed957b9f439297b7f51595574c5be Mon Sep 17 00:00:00 2001 From: Marc Karimi Date: Fri, 28 Jun 2024 16:00:17 -0400 Subject: [PATCH 024/139] Cleaned up other unit tests --- src/aspire/image/image.py | 1 - tests/test_sinogram.py | 12 +++++------- 2 files changed, 5 insertions(+), 8 deletions(-) diff --git a/src/aspire/image/image.py b/src/aspire/image/image.py index e12cf29598..f0ef24dcc7 100644 --- a/src/aspire/image/image.py +++ b/src/aspire/image/image.py @@ -5,7 +5,6 @@ import matplotlib.pyplot as plt import mrcfile import numpy as np -from numpy.fft import irfft from PIL import Image as PILImage from scipy.linalg import lstsq diff --git a/tests/test_sinogram.py b/tests/test_sinogram.py index 2abe42f12e..fc8e712408 100644 --- a/tests/test_sinogram.py +++ b/tests/test_sinogram.py @@ -4,7 +4,6 @@ from skimage.transform import radon from aspire.image import Image -from aspire.source import Simulation from aspire.utils import grid_2d # parameter img_sizes: 511, 512 @@ -85,9 +84,8 @@ def test_multidim(): g = grid_2d(L, normalized=True, shifted=True) mask = g["r"] < 1 - # Generate a simulation - src = Simulation(n=n, L=L, C=1, dtype=np.float64) - imgs = src.images[:] * mask + # Generate images + imgs = Image(np.random.random((n, L, L))) * mask # Generate line project angles angles = np.linspace(0, 180, L, endpoint=False) @@ -97,11 +95,11 @@ def test_multidim(): # # Compare with sk reference_sinograms = np.empty((n, L, L)) for i, img in enumerate(imgs._data): - reference_sinograms[i] = radon(img, theta=angles[::-1]) + reference_sinograms[i] = radon(img, theta=angles[::-1]).T # decrease tolerance as L goes up for i in range(n): nrms = np.sqrt( - np.mean((s[i] - reference_sinograms[i]) ** 2, axis=0) - ) / np.linalg.norm(reference_sinograms[i], axis=0) + np.mean((s[i] - reference_sinograms[i]) ** 2, axis=1) + ) / np.linalg.norm(reference_sinograms[i], axis=1) np.testing.assert_array_less(nrms, 0.05, err_msg=f"Error in image {i}") From cff9378770f095c1b77eaf11055cf4c433a002aa Mon Sep 17 00:00:00 2001 From: Marc Karimi Date: Fri, 5 Jul 2024 20:48:03 -0400 Subject: [PATCH 025/139] Added Doc Test and Cleaned up Code --- src/aspire/image/image.py | 15 ++++++++------- tests/test_sinogram.py | 17 ++++++++--------- 2 files changed, 16 insertions(+), 16 deletions(-) diff --git a/src/aspire/image/image.py b/src/aspire/image/image.py index f0ef24dcc7..d5d1b9885e 100644 --- a/src/aspire/image/image.py +++ b/src/aspire/image/image.py @@ -187,8 +187,12 @@ def __init__(self, data, dtype=None): self.__array__ = self._data def project(self, angles): - """docstring - angles: radians + """ + Computes the Radon Transform on an Image Stack using Non-Uniform Fast Fourier Transforms. This method projects the Image stack along different angles and returns the Radon Transform. + + :param angles: A 1-D Numpy Array of angles in Radians. This is used to compute the Radon Transform at different angles. + :return: Radon transform of the Image Stack. + :rtype: Ndarray (stack size, number of angles, image resolution) """ # number of points to sample on radial line in polar grid n_points = self.resolution @@ -198,20 +202,17 @@ def project(self, angles): y_idx = np.fft.rfftfreq(n_points) * np.pi * 2 n_real_points = len(y_idx) - # y_idx = np.fft.fftshift(np.fft.fftfreq(n_points)) pts = np.empty((2, len(angles), n_real_points), dtype=self.dtype) - pts[0] = y_idx[np.newaxis, :] * np.sin(angles)[:, np.newaxis] pts[1] = y_idx[np.newaxis, :] * np.cos(angles)[:, np.newaxis] pts = pts.reshape(2, n_real_points * len(angles)) - # NUFFT - # compute the polar nufft + # compute the polar nufft (NUFFT) image_ft = nufft(self.stack_reshape(-1)._data, pts).reshape( self.n_images, len(angles), n_real_points ) - # compute the Radon transform (sinogram) + # Radon transform image_rt = np.fft.fftshift( np.fft.irfftn(image_ft, s=(self.n_images, n_points), axes=(0, 2)), axes=(0, 2), diff --git a/tests/test_sinogram.py b/tests/test_sinogram.py index fc8e712408..a935f68ccc 100644 --- a/tests/test_sinogram.py +++ b/tests/test_sinogram.py @@ -38,7 +38,7 @@ def img_size(request): @pytest.fixture def masked_image(dtype, img_size): """ - Construct a masked image fixture that takes paramters + Creates a masked image fixture using camera data from Skikit-Image. """ g = grid_2d(img_size, normalized=True, shifted=True) mask = g["r"] < 1 @@ -51,7 +51,7 @@ def masked_image(dtype, img_size): # Image.project and compare results to skimage.radon def test_image_project(masked_image): """ - TestImage.project on a single stack of images. Compares project method with skimage. + Test Image.project on a single stack of images. Compares project method with skimage. """ ny = masked_image.resolution angles = np.linspace(0, 360, ny + 1, endpoint=False) @@ -59,22 +59,22 @@ def test_image_project(masked_image): s = masked_image.project(rads) assert s.shape == (1, len(angles), ny) - # add reference skimage radon here + # ski-kit image radon reference n = masked_image._data[0] reference_sinogram = radon(n, theta=angles[::-1]).T # transpose angles, points assert reference_sinogram.shape == (len(angles), ny) - # compare s with reference + # compare project method on ski-image reference nrms = np.sqrt(np.mean((s[0] - reference_sinogram) ** 2, axis=1)) / np.linalg.norm( reference_sinogram, axis=1 ) tol = 0.002 - np.testing.assert_array_less(nrms, tol, "Error in test image") + np.testing.assert_array_less(nrms, tol, "Error in image projections.") def test_multidim(): """ - Test Image.project on stacks of images. + Test Image.project on stacks of images. Extension of test_image_project but for multi-dimensional stacks. """ L = 512 # pixels @@ -92,14 +92,13 @@ def test_multidim(): rads = angles / 180.0 * np.pi s = imgs.project(rads) - # # Compare with sk + # Compare with ski-image reference_sinograms = np.empty((n, L, L)) for i, img in enumerate(imgs._data): reference_sinograms[i] = radon(img, theta=angles[::-1]).T - # decrease tolerance as L goes up for i in range(n): nrms = np.sqrt( np.mean((s[i] - reference_sinograms[i]) ** 2, axis=1) ) / np.linalg.norm(reference_sinograms[i], axis=1) - np.testing.assert_array_less(nrms, 0.05, err_msg=f"Error in image {i}") + np.testing.assert_array_less(nrms, 0.05, err_msg=f"Error in image {i}.") From 5192d54ca19b90d6662edc27e72cc0b5a4b8b5a6 Mon Sep 17 00:00:00 2001 From: Garrett Wright Date: Wed, 10 Jul 2024 19:59:12 -0400 Subject: [PATCH 026/139] fixup sinogram tests and simpler multi test Co-authored-by: Marc Karimi --- tests/test_sinogram.py | 37 +++++++++++++++++++++++++++---------- 1 file changed, 27 insertions(+), 10 deletions(-) diff --git a/tests/test_sinogram.py b/tests/test_sinogram.py index a935f68ccc..323c1f2eb0 100644 --- a/tests/test_sinogram.py +++ b/tests/test_sinogram.py @@ -6,6 +6,10 @@ from aspire.image import Image from aspire.utils import grid_2d +# Relative tolerance comparing line projections to scikit +# The same tolerance will be used in all scikit comparisons +SK_TOL = 0.002 + # parameter img_sizes: 511, 512 IMG_SIZES = [ 511, @@ -59,17 +63,23 @@ def test_image_project(masked_image): s = masked_image.project(rads) assert s.shape == (1, len(angles), ny) - # ski-kit image radon reference - n = masked_image._data[0] - reference_sinogram = radon(n, theta=angles[::-1]).T # transpose angles, points - assert reference_sinogram.shape == (len(angles), ny) + # sci-kit image `radon` reference + # + # Note, Image.project's angles are wrt projection line (ie + # grid), while sk's radon are wrt the image. To correspond the + # rotations are inverted. This was the convention prefered by + # the original author of this method. + # + # Note, transpose sk output to match (angles, points) + reference_sinogram = radon(masked_image._data[0], theta=angles[::-1]).T + assert reference_sinogram.shape == (len(angles), ny), "Incorrect Shape" # compare project method on ski-image reference nrms = np.sqrt(np.mean((s[0] - reference_sinogram) ** 2, axis=1)) / np.linalg.norm( reference_sinogram, axis=1 ) - tol = 0.002 - np.testing.assert_array_less(nrms, tol, "Error in image projections.") + + np.testing.assert_array_less(nrms, SK_TOL, "Error in image projections.") def test_multidim(): @@ -92,13 +102,20 @@ def test_multidim(): rads = angles / 180.0 * np.pi s = imgs.project(rads) - # Compare with ski-image + # Compare reference_sinograms = np.empty((n, L, L)) - for i, img in enumerate(imgs._data): - reference_sinograms[i] = radon(img, theta=angles[::-1]).T + for i, img in enumerate(imgs): + # Compute the singleton case, and compare with the stack + single_sinogram = img.project(rads) + # These should be allclose up to determinism in the FFT and NUFFT. + np.testing.assert_allclose(s[i : i + 1], single_sinogram) + + # Next individually compute sk's radon transform for each image. + reference_sinograms[i] = radon(img._data[0], theta=angles[::-1]).T + # Compare all lines in each sinogram with sk-image for i in range(n): nrms = np.sqrt( np.mean((s[i] - reference_sinograms[i]) ** 2, axis=1) ) / np.linalg.norm(reference_sinograms[i], axis=1) - np.testing.assert_array_less(nrms, 0.05, err_msg=f"Error in image {i}.") + np.testing.assert_array_less(nrms, SK_TOL, err_msg=f"Error in image {i}.") From b321364755fa48d43d776cdb20c1acd3e1b96edd Mon Sep 17 00:00:00 2001 From: Garrett Wright Date: Wed, 10 Jul 2024 20:07:57 -0400 Subject: [PATCH 027/139] fix irfft and shift Co-authored-by: Marc Karimi --- src/aspire/image/image.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/src/aspire/image/image.py b/src/aspire/image/image.py index d5d1b9885e..dd0adefd13 100644 --- a/src/aspire/image/image.py +++ b/src/aspire/image/image.py @@ -213,10 +213,7 @@ def project(self, angles): ) # Radon transform - image_rt = np.fft.fftshift( - np.fft.irfftn(image_ft, s=(self.n_images, n_points), axes=(0, 2)), - axes=(0, 2), - ) + image_rt = np.fft.fftshift(np.fft.irfft(image_ft, n=n_points, axis=-1), axes=-1) image_rt = image_rt.reshape(*original_stack, len(angles), n_points) return image_rt From 3d9d123d291474403de7dc5fc54bb6d4daf3069f Mon Sep 17 00:00:00 2001 From: Marc Karimi Date: Thu, 11 Jul 2024 12:49:52 -0400 Subject: [PATCH 028/139] added angles but need to change multidim --- src/aspire/image/image.py | 17 +++++++++-------- tests/test_sinogram.py | 14 ++++++++++---- 2 files changed, 19 insertions(+), 12 deletions(-) diff --git a/src/aspire/image/image.py b/src/aspire/image/image.py index dd0adefd13..f6e07dcdc6 100644 --- a/src/aspire/image/image.py +++ b/src/aspire/image/image.py @@ -199,22 +199,23 @@ def project(self, angles): original_stack = self.stack_shape # 2-D grid - y_idx = np.fft.rfftfreq(n_points) * np.pi * 2 - n_real_points = len(y_idx) + radial_idx = np.fft.rfftfreq(n_points) * np.pi * 2 + n_real_points = len(radial_idx) + n_angles = len(angles) - pts = np.empty((2, len(angles), n_real_points), dtype=self.dtype) - pts[0] = y_idx[np.newaxis, :] * np.sin(angles)[:, np.newaxis] - pts[1] = y_idx[np.newaxis, :] * np.cos(angles)[:, np.newaxis] - pts = pts.reshape(2, n_real_points * len(angles)) + pts = np.empty((2, n_angles, n_real_points), dtype=self.dtype) + pts[0] = radial_idx[np.newaxis, :] * np.sin(angles)[:, np.newaxis] + pts[1] = radial_idx[np.newaxis, :] * np.cos(angles)[:, np.newaxis] + pts = pts.reshape(2, n_real_points * n_angles) # compute the polar nufft (NUFFT) image_ft = nufft(self.stack_reshape(-1)._data, pts).reshape( - self.n_images, len(angles), n_real_points + self.n_images, n_angles, n_real_points ) # Radon transform image_rt = np.fft.fftshift(np.fft.irfft(image_ft, n=n_points, axis=-1), axes=-1) - image_rt = image_rt.reshape(*original_stack, len(angles), n_points) + image_rt = image_rt.reshape(*original_stack, n_angles, n_points) return image_rt @property diff --git a/tests/test_sinogram.py b/tests/test_sinogram.py index 323c1f2eb0..94cc172189 100644 --- a/tests/test_sinogram.py +++ b/tests/test_sinogram.py @@ -10,18 +10,17 @@ # The same tolerance will be used in all scikit comparisons SK_TOL = 0.002 -# parameter img_sizes: 511, 512 IMG_SIZES = [ 511, 512, ] -# parameter dtype: float32, float64 DTYPES = [ np.float32, np.float64, ] +ANGLES = [1, 50, 90, 117, 180, 360] @pytest.fixture(params=DTYPES, ids=lambda x: f"dtype={x}", scope="module") def dtype(request): @@ -38,6 +37,13 @@ def img_size(request): """ return request.param +@pytest.fixture(params=ANGLES, ids=lambda x: f"angles={x}", scope="module") +def num_ang(request): + """ + Angles. + """ + return request.param + @pytest.fixture def masked_image(dtype, img_size): @@ -53,12 +59,12 @@ def masked_image(dtype, img_size): # Image.project and compare results to skimage.radon -def test_image_project(masked_image): +def test_image_project(masked_image, num_ang): """ Test Image.project on a single stack of images. Compares project method with skimage. """ ny = masked_image.resolution - angles = np.linspace(0, 360, ny + 1, endpoint=False) + angles = np.linspace(0, 360, num_ang, endpoint=False) rads = angles / 180 * np.pi s = masked_image.project(rads) assert s.shape == (1, len(angles), ny) From d486dd2d269366fb48655138f6b4f091a9bc8948 Mon Sep 17 00:00:00 2001 From: Marc Karimi Date: Thu, 11 Jul 2024 15:37:37 -0400 Subject: [PATCH 029/139] Added Changes from PR: parameterized angles, adjusted tests accordingly, renamed variables --- tests/test_sinogram.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/tests/test_sinogram.py b/tests/test_sinogram.py index 94cc172189..69fdd1da37 100644 --- a/tests/test_sinogram.py +++ b/tests/test_sinogram.py @@ -8,7 +8,7 @@ # Relative tolerance comparing line projections to scikit # The same tolerance will be used in all scikit comparisons -SK_TOL = 0.002 +SK_TOL = 0.005 IMG_SIZES = [ 511, @@ -22,6 +22,7 @@ ANGLES = [1, 50, 90, 117, 180, 360] + @pytest.fixture(params=DTYPES, ids=lambda x: f"dtype={x}", scope="module") def dtype(request): """ @@ -37,6 +38,7 @@ def img_size(request): """ return request.param + @pytest.fixture(params=ANGLES, ids=lambda x: f"angles={x}", scope="module") def num_ang(request): """ @@ -88,7 +90,7 @@ def test_image_project(masked_image, num_ang): np.testing.assert_array_less(nrms, SK_TOL, "Error in image projections.") -def test_multidim(): +def test_multidim(num_ang): """ Test Image.project on stacks of images. Extension of test_image_project but for multi-dimensional stacks. """ @@ -104,12 +106,12 @@ def test_multidim(): imgs = Image(np.random.random((n, L, L))) * mask # Generate line project angles - angles = np.linspace(0, 180, L, endpoint=False) + angles = np.linspace(0, 180, num_ang, endpoint=False) rads = angles / 180.0 * np.pi s = imgs.project(rads) # Compare - reference_sinograms = np.empty((n, L, L)) + reference_sinograms = np.empty((n, num_ang, L)) for i, img in enumerate(imgs): # Compute the singleton case, and compare with the stack single_sinogram = img.project(rads) From 614f08ddae28494637bf8648555966b973f57977 Mon Sep 17 00:00:00 2001 From: Marc Karimi Date: Thu, 18 Jul 2024 00:46:02 -0400 Subject: [PATCH 030/139] Added extra comments + Integrated Changes from lineproject_dbg2 branch --- src/aspire/image/image.py | 2 +- tests/test_sinogram.py | 57 ++++++++++++++++++++++----------------- 2 files changed, 34 insertions(+), 25 deletions(-) diff --git a/src/aspire/image/image.py b/src/aspire/image/image.py index f6e07dcdc6..613f393993 100644 --- a/src/aspire/image/image.py +++ b/src/aspire/image/image.py @@ -213,7 +213,7 @@ def project(self, angles): self.n_images, n_angles, n_real_points ) - # Radon transform + # Radon transform, output: (stack size, angles, points) image_rt = np.fft.fftshift(np.fft.irfft(image_ft, n=n_points, axis=-1), axes=-1) image_rt = image_rt.reshape(*original_stack, n_angles, n_points) return image_rt diff --git a/tests/test_sinogram.py b/tests/test_sinogram.py index 69fdd1da37..4dee693351 100644 --- a/tests/test_sinogram.py +++ b/tests/test_sinogram.py @@ -20,7 +20,14 @@ np.float64, ] -ANGLES = [1, 50, 90, 117, 180, 360] +ANGLES = [ + 1, + 50, + pytest.param(90, marks=pytest.mark.expensive), + pytest.param(117, marks=pytest.mark.expensive), + pytest.param(180, marks=pytest.mark.expensive), + pytest.param(360, marks=pytest.mark.expensive), +] @pytest.fixture(params=DTYPES, ids=lambda x: f"dtype={x}", scope="module") @@ -42,7 +49,7 @@ def img_size(request): @pytest.fixture(params=ANGLES, ids=lambda x: f"angles={x}", scope="module") def num_ang(request): """ - Angles. + Angles (Degrees). """ return request.param @@ -50,7 +57,7 @@ def num_ang(request): @pytest.fixture def masked_image(dtype, img_size): """ - Creates a masked image fixture using camera data from Skikit-Image. + Creates a masked image fixture using camera data from Scikit-Image. """ g = grid_2d(img_size, normalized=True, shifted=True) mask = g["r"] < 1 @@ -63,7 +70,7 @@ def masked_image(dtype, img_size): # Image.project and compare results to skimage.radon def test_image_project(masked_image, num_ang): """ - Test Image.project on a single stack of images. Compares project method with skimage. + Test Image.project on a single stack of images. Compares project method output with skimage project. """ ny = masked_image.resolution angles = np.linspace(0, 360, num_ang, endpoint=False) @@ -83,8 +90,8 @@ def test_image_project(masked_image, num_ang): assert reference_sinogram.shape == (len(angles), ny), "Incorrect Shape" # compare project method on ski-image reference - nrms = np.sqrt(np.mean((s[0] - reference_sinogram) ** 2, axis=1)) / np.linalg.norm( - reference_sinogram, axis=1 + nrms = np.sqrt(np.mean((s[0] - reference_sinogram) ** 2, axis=-1)) / np.linalg.norm( + reference_sinogram, axis=-1 ) np.testing.assert_array_less(nrms, SK_TOL, "Error in image projections.") @@ -97,33 +104,35 @@ def test_multidim(num_ang): L = 512 # pixels n = 3 + m = 2 # Generate a mask g = grid_2d(L, normalized=True, shifted=True) mask = g["r"] < 1 # Generate images - imgs = Image(np.random.random((n, L, L))) * mask + imgs = Image(np.random.random((m, n, L, L))) * mask # Generate line project angles - angles = np.linspace(0, 180, num_ang, endpoint=False) + angles = np.linspace(0, 360, num_ang, endpoint=False) rads = angles / 180.0 * np.pi s = imgs.project(rads) # Compare - reference_sinograms = np.empty((n, num_ang, L)) - for i, img in enumerate(imgs): - # Compute the singleton case, and compare with the stack - single_sinogram = img.project(rads) - # These should be allclose up to determinism in the FFT and NUFFT. - np.testing.assert_allclose(s[i : i + 1], single_sinogram) - - # Next individually compute sk's radon transform for each image. - reference_sinograms[i] = radon(img._data[0], theta=angles[::-1]).T - - # Compare all lines in each sinogram with sk-image - for i in range(n): - nrms = np.sqrt( - np.mean((s[i] - reference_sinograms[i]) ** 2, axis=1) - ) / np.linalg.norm(reference_sinograms[i], axis=1) - np.testing.assert_array_less(nrms, SK_TOL, err_msg=f"Error in image {i}.") + reference_sinograms = np.empty((m, n, num_ang, L)) + for i in range(m): + for j in range(n): + img = imgs[i, j] + # Compute the singleton case, and compare with stack. + single_sinogram = img.project(rads) + + # These should be allclose up to determinism in the FFT and NUFFT. + np.testing.assert_allclose(s[i, j : j + 1], single_sinogram) + + # Next individually compute sk's radon transform for each image. + reference_sinograms[i, j] = radon(img._data[0], theta=angles[::-1]).T + + _nrms = np.sqrt(np.mean((s - reference_sinograms) ** 2, axis=-1)) / np.linalg.norm( + reference_sinograms, axis=-1 + ) + np.testing.assert_array_less(_nrms, SK_TOL, "Error in image projections.") From 2b2e01901c444ef1231848dc5fc07ebb57294d11 Mon Sep 17 00:00:00 2001 From: Marc Karimi Date: Fri, 19 Jul 2024 04:10:13 -0400 Subject: [PATCH 031/139] Changed angle fixture description + Id Name --- tests/test_sinogram.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/test_sinogram.py b/tests/test_sinogram.py index 4dee693351..56aa6776e0 100644 --- a/tests/test_sinogram.py +++ b/tests/test_sinogram.py @@ -46,10 +46,10 @@ def img_size(request): return request.param -@pytest.fixture(params=ANGLES, ids=lambda x: f"angles={x}", scope="module") +@pytest.fixture(params=ANGLES, ids=lambda x: f"n_angles={x}", scope="module") def num_ang(request): """ - Angles (Degrees). + Number of angles in radon transform. """ return request.param From fa8bd54785040fa0d243344be02b6bbd2357b275 Mon Sep 17 00:00:00 2001 From: Garrett Wright Date: Fri, 19 Jul 2024 08:01:36 -0400 Subject: [PATCH 032/139] Docstring len cleanup --- src/aspire/image/image.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/src/aspire/image/image.py b/src/aspire/image/image.py index 613f393993..20d998afe6 100644 --- a/src/aspire/image/image.py +++ b/src/aspire/image/image.py @@ -188,9 +188,13 @@ def __init__(self, data, dtype=None): def project(self, angles): """ - Computes the Radon Transform on an Image Stack using Non-Uniform Fast Fourier Transforms. This method projects the Image stack along different angles and returns the Radon Transform. + Computes the Radon Transform on an Image Stack using + Non-Uniform Fast Fourier Transforms. This method projects the + Image stack along different angles and returns the Radon + Transform. - :param angles: A 1-D Numpy Array of angles in Radians. This is used to compute the Radon Transform at different angles. + :param angles: A 1-D Numpy Array of angles in Radians. + This is used to compute the Radon Transform at different angles. :return: Radon transform of the Image Stack. :rtype: Ndarray (stack size, number of angles, image resolution) """ From 2d5a459b1c1d59fc140aa521e44e94457acfae77 Mon Sep 17 00:00:00 2001 From: Garrett Wright Date: Thu, 9 May 2024 10:21:18 -0400 Subject: [PATCH 033/139] add simple basis benchmark and plotting script [skip ci] --- bbenchmark/bbenchmark.py | 57 ++++++++++++++++++++++++++++++ bbenchmark/benchmark_gpu0.pkl | Bin 0 -> 307 bytes bbenchmark/benchmark_host.pkl | Bin 0 -> 307 bytes bbenchmark/plot_bb.py | 64 ++++++++++++++++++++++++++++++++++ 4 files changed, 121 insertions(+) create mode 100644 bbenchmark/bbenchmark.py create mode 100644 bbenchmark/benchmark_gpu0.pkl create mode 100644 bbenchmark/benchmark_host.pkl create mode 100644 bbenchmark/plot_bb.py diff --git a/bbenchmark/bbenchmark.py b/bbenchmark/bbenchmark.py new file mode 100644 index 0000000000..6c6320c101 --- /dev/null +++ b/bbenchmark/bbenchmark.py @@ -0,0 +1,57 @@ +import os +import pickle +from pprint import pprint +from time import perf_counter, time + +import matplotlib.pyplot as plt +import numpy as np +from aspire.basis import FFBBasis2D, FLEBasis2D +from aspire.downloader import emdb_2660 +from aspire.noise import WhiteNoiseAdder +from aspire.source import ArrayImageSource, Simulation + +# Download and cache volume map +vol = emdb_2660().astype(np.float64) # doubles +cached_image_fn = "simulated_images.npy" + +if os.path.exists(cached_image_fn): + print(f"Loading cached image source from {cached_image_fn}.") + sim = ArrayImageSource(np.load(cached_image_fn)) +else: + print("Generating Simulated Datatset") + sim = Simulation( + n=512, C=1, vols=vol, noise_adder=WhiteNoiseAdder.from_snr(0.1) + ).cache() + print(f"Saving to {cached_image_fn}") + np.save(cached_image_fn, sim.images[:].asnumpy()) + + +TIMES = {} +for L in [32, 64, 128, 256]: + print(f"Begin L={L}") + src = sim.downsample(L) + imgs = src.images[:] + TIMES[L] = {} + for basis_type in [FFBBasis2D, FLEBasis2D]: + # Construct basis + TIMES[L][basis_type.__name__] = {} + basis = basis_type(L, dtype=src.dtype) + + # Time expanding into basis + tic = perf_counter() + coef = basis.evaluate_t(imgs) + toc = perf_counter() + TIMES[L][basis_type.__name__]["evaluate_t"] = toc - tic + + # Time expanding back into images + tic = perf_counter() + _ = coef.evaluate() + toc = perf_counter() + TIMES[L][basis_type.__name__]["evaluate"] = toc - tic + + +pprint(TIMES) + + +with open(f"benchmark_{int(time())}.pkl", "wb") as fh: + pickle.dump(TIMES, fh) diff --git a/bbenchmark/benchmark_gpu0.pkl b/bbenchmark/benchmark_gpu0.pkl new file mode 100644 index 0000000000000000000000000000000000000000..e702dd442dced27acb26fcc3b3e395ce9f465585 GIT binary patch literal 307 zcmZo*nX19a00y;FG`tmnL=Tsno0C&wab~fR%M>s_wJb5GG_fQ#zGRBK{fB=m-#lPo z=;45g0>v(0*=`CnqZFvs#}!Fy28+7`_dcgM4hDt{R(A)1g!$SUKxL)g4nT7=m_P)J zyZseA`*#jt74}bVm#$9$s>oo2$TFJo@? J0igC$Jpf$(W#0e* literal 0 HcmV?d00001 diff --git a/bbenchmark/benchmark_host.pkl b/bbenchmark/benchmark_host.pkl new file mode 100644 index 0000000000000000000000000000000000000000..dc0dd2a1769fc52c9470986e74eb64864f59e7fe GIT binary patch literal 307 zcmZo*nX19a00y;FG`tmnL=Tsno0C&wab~fR%M>s_wJb5GG_fQ#zGRBK{l>F_wm|hg z957L!*k;SlN}yONP^*tClGY3scLzS3waO9<3>mEM4m>kTmTLf&m3lh>&COr}5iIWZ zmm4fH4}ewJUv3m%3ovVBHPKy1(9iTm;ktG~fPnyM$W- zvToDPIg_qJbek#on}>jO`!X;hX?O6pRZ8-OC=s5uZo?jA?UmgRJ_s6sonHA^-k?wb IsJ&DV0PQbeIsgCw literal 0 HcmV?d00001 diff --git a/bbenchmark/plot_bb.py b/bbenchmark/plot_bb.py new file mode 100644 index 0000000000..05f5350f4b --- /dev/null +++ b/bbenchmark/plot_bb.py @@ -0,0 +1,64 @@ +import os +import pickle +from pprint import pprint + +import matplotlib.pyplot as plt +import numpy as np + +host_fn = "benchmark_host.pkl" +gpu_fn = "benchmark_gpu0.pkl" + + +with open(host_fn, "rb") as fh: + host_times = pickle.load(fh) + +with open(gpu_fn, "rb") as fh: + gpu_times = pickle.load(fh) + +markers = {"FFBBasis2D": "8", "FLEBasis2D": "s"} + +# Evaluate_t +Ls = list(host_times.keys()) +for basis_type in markers.keys(): + plt.plot( + Ls, + [host_times[L][basis_type]["evaluate_t"] for L in Ls], + marker=markers[basis_type], + color="blue", + label=basis_type + "-host", + ) + plt.plot( + Ls, + [gpu_times[L][basis_type]["evaluate_t"] for L in Ls], + marker=markers[basis_type], + color="green", + label=basis_type + "-gpu", + ) +plt.title("Basis `evaluate_t` Permformance - Batch of 512 Images") +plt.xlabel("Image Pixel L (LxL)") +plt.ylabel("Time (seconds)") +plt.legend() +plt.savefig("evaluate_t.png") +plt.show() + +for basis_type in markers.keys(): + plt.plot( + Ls, + [host_times[L][basis_type]["evaluate"] for L in Ls], + marker=markers[basis_type], + color="blue", + label=basis_type + "-host", + ) + plt.plot( + Ls, + [gpu_times[L][basis_type]["evaluate"] for L in Ls], + marker=markers[basis_type], + color="green", + label=basis_type + "-gpu", + ) +plt.title("Basis `evaluate` Permformance - Batch of 512 Images") +plt.xlabel("Image Pixel L (LxL)") +plt.ylabel("Time (seconds)") +plt.legend() +plt.savefig("evaluate.png") +plt.show() From f4f41df23c8839ca3b3658f260e0325a4f35d437 Mon Sep 17 00:00:00 2001 From: Garrett Wright Date: Tue, 4 Jun 2024 10:26:19 -0400 Subject: [PATCH 034/139] convert cufinufft towards cupy, keeping result on dvice --- pyproject.toml | 10 ++++----- src/aspire/nufft/cufinufft.py | 40 +++++++++++++++++------------------ 2 files changed, 25 insertions(+), 25 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 3cd57981ef..fcc0f7cf4f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -61,11 +61,11 @@ dependencies = [ "Source" = "https://github.com/ComputationalCryoEM/ASPIRE-Python" [project.optional-dependencies] -gpu-102 = ["pycuda", "cupy-cuda102", "cufinufft==1.3"] -gpu-110 = ["pycuda", "cupy-cuda110", "cufinufft==1.3"] -gpu-111 = ["pycuda", "cupy-cuda111", "cufinufft==1.3"] -gpu-11x = ["pycuda", "cupy-cuda11x", "cufinufft==1.3"] -gpu-12x = ["pycuda", "cupy-cuda12x", "cufinufft==2.2.0"] +gpu-102 = ["cupy-cuda102", "cufinufft==1.3"] +gpu-110 = ["cupy-cuda110", "cufinufft==1.3"] +gpu-111 = ["cupy-cuda111", "cufinufft==1.3"] +gpu-11x = ["cupy-cuda11x", "cufinufft==1.3"] +gpu-12x = ["cupy-cuda12x", "cufinufft==2.2.0"] dev = [ "black", "bumpversion", diff --git a/src/aspire/nufft/cufinufft.py b/src/aspire/nufft/cufinufft.py index 465c0b23f9..2dceb08b80 100644 --- a/src/aspire/nufft/cufinufft.py +++ b/src/aspire/nufft/cufinufft.py @@ -1,9 +1,7 @@ import logging +import cupy as cp import numpy as np -import pycuda.autoinit # noqa: F401 -import pycuda.driver as cuda # noqa: F401 -import pycuda.gpuarray as gpuarray # noqa: F401 from cufinufft import Plan as cufPlan from aspire.nufft import Plan @@ -85,7 +83,7 @@ def __init__(self, sz, fourier_pts, epsilon=1e-8, ntransforms=1, **kwargs): # Note, I store self.fourier_pts_gpu so the GPUArrray life # is tied to instance, instead of this method. - self.fourier_pts_gpu = gpuarray.to_gpu(self.fourier_pts) + self.fourier_pts_gpu = cp.array(self.fourier_pts) self._transform_plan.setpts(*self.fourier_pts_gpu) self._adjoint_plan.setpts(*self.fourier_pts_gpu) @@ -99,7 +97,7 @@ def transform(self, signal): For a batch, signal should have shape `(*sz, ntransforms)`. :returns: Transformed signal of shape `num_pts` or - `(ntransforms, num_pts)`. + `(ntransforms, num_pts)` as CuPy array. """ # Check we're not forcing a dtype workaround for ASPIRE-Python/703, @@ -113,6 +111,8 @@ def transform(self, signal): " In the future this will be an error." ) + signal = cp.asarray(signal, dtype=self.complex_dtype) + sig_shape = signal.shape res_shape = self.num_pts # Note, there is a corner case for ntransforms == 1. @@ -134,17 +134,16 @@ def transform(self, signal): sig_shape == self.sz ), f"Signal frame to be transformed must have shape {self.sz}" - signal_gpu = gpuarray.to_gpu( - np.ascontiguousarray(signal, dtype=self.complex_dtype) - ) + result = cp.empty(res_shape, dtype=self.complex_dtype) - result_gpu = gpuarray.GPUArray(res_shape, dtype=self.complex_dtype) + if signal.dtype != self.complex_dtype: + signal = signal.astype(self.complex_dtype) - self._transform_plan.execute(signal_gpu, out=result_gpu) + self._transform_plan.execute(signal, out=result) - result = result_gpu.get() # ASPIRE-Python/703 - result = result.astype(complex_type(self._original_dtype), copy=False) + if result.dtype != complex_type(self._original_dtype): + result = result.astype(complex_type(self._original_dtype)) return result @@ -156,7 +155,7 @@ def adjoint(self, signal): this should be a a 1D array of len `num_pts`. For a batch, signal should have shape `(ntransforms, num_pts)`. - :returns: Transformed signal `(sz)` or `(sz, ntransforms)`. + :returns: Transformed signal `(sz)` or `(sz, ntransforms)` as CuPy array. """ # Check we're not forcing a dtype workaround for ASPIRE-Python/703, @@ -170,6 +169,8 @@ def adjoint(self, signal): " In the future this will be an error." ) + signal = cp.asarray(signal, dtype=self.complex_dtype) + res_shape = self.sz # Note, there is a corner case for ntransforms == 1. if self.ntransforms > 1 or (self.ntransforms == 1 and len(signal.shape) == 2): @@ -181,16 +182,15 @@ def adjoint(self, signal): ), "For multiple transforms, signal stack length should match ntransforms {self.ntransforms}." res_shape = (self.ntransforms, *self.sz) - signal_gpu = gpuarray.to_gpu( - np.ascontiguousarray(signal, dtype=self.complex_dtype) - ) + result = cp.empty(res_shape, dtype=self.complex_dtype) - result_gpu = gpuarray.GPUArray(res_shape, dtype=self.complex_dtype) + if signal.dtype != self.complex_dtype: + signal = signal.astype(self.complex_dtype) - self._adjoint_plan.execute(signal_gpu, out=result_gpu) + self._adjoint_plan.execute(signal, out=result) - result = result_gpu.get() # ASPIRE-Python/703 - result = result.astype(complex_type(self._original_dtype), copy=False) + if result.dtype != complex_type(self._original_dtype): + result = result.astype(complex_type(self._original_dtype)) return result From a60a3e0976f73e78a5427515705eff17d6096ba5 Mon Sep 17 00:00:00 2001 From: Garrett Wright Date: Tue, 4 Jun 2024 10:49:11 -0400 Subject: [PATCH 035/139] convert anufft and nufft towards detecting whether to keep array on gpu [skip ci] --- src/aspire/nufft/__init__.py | 35 +++++++++++++++++++++++++++++++++-- 1 file changed, 33 insertions(+), 2 deletions(-) diff --git a/src/aspire/nufft/__init__.py b/src/aspire/nufft/__init__.py index aa7c3a4adf..f748a10d84 100644 --- a/src/aspire/nufft/__init__.py +++ b/src/aspire/nufft/__init__.py @@ -2,6 +2,11 @@ import numpy as np +try: + import cupy as cp +except ModuleNotFoundError: + cp = None + from aspire import config from aspire.utils import LogFilterByCount, complex_type, real_type @@ -152,6 +157,9 @@ def anufft(sig_f, fourier_pts, sz, real=False, epsilon=1e-8): Selects best available package from `nfft` `backends` configuration list. + When sig_f is provided as a CuPy gpu array with a cufinufft + backend, result is maintained on GPU. + :param sig_f: Array representing the signal(s) in Fourier space to be transformed. \ sig_f either matches length of fourier_pts or sig_f.shape is stack of (`ntransforms`, ...). :param fourier_pts: The points in Fourier space where the Fourier transform is to be calculated, @@ -162,6 +170,10 @@ def anufft(sig_f, fourier_pts, sz, real=False, epsilon=1e-8): """ + on_gpu = False + if cp and isinstance(sig_f, cp.ndarray): + on_gpu = True + if fourier_pts.dtype != real_type(sig_f.dtype): raise RuntimeError( "anufft passed inconsistent dtypes." @@ -181,7 +193,13 @@ def anufft(sig_f, fourier_pts, sz, real=False, epsilon=1e-8): sz=sz, fourier_pts=fourier_pts, ntransforms=ntransforms, epsilon=epsilon ) adjoint = plan.adjoint(sig_f) - return np.real(adjoint) if real else adjoint + + adjoint = adjoint.real if real else adjoint + + if not on_gpu: + adjoint = adjoint.get() + + return adjoint def nufft(sig_f, fourier_pts, real=False, epsilon=1e-8): @@ -191,6 +209,9 @@ def nufft(sig_f, fourier_pts, real=False, epsilon=1e-8): Selects best available package from `nfft` `backends` configuration list. + When sig_f is provided as a CuPy gpu array with a cufinufft + backend, result is maintained on GPU. + :param sig_f: Array representing the signal(s) in real space to be transformed. \ sig_f either matches `sz` or sig_f.shape is stack of (..., `ntransforms`). :param fourier_pts: The points in Fourier space where the Fourier transform is to be calculated, @@ -200,6 +221,10 @@ def nufft(sig_f, fourier_pts, real=False, epsilon=1e-8): """ + on_gpu = False + if cp and isinstance(sig_f, cp.ndarray): + on_gpu = True + if fourier_pts.dtype != real_type(sig_f.dtype): raise RuntimeError( "nufft passed inconsistent dtypes." @@ -229,4 +254,10 @@ def nufft(sig_f, fourier_pts, real=False, epsilon=1e-8): sz=sz, fourier_pts=fourier_pts, ntransforms=ntransforms, epsilon=epsilon ) transform = plan.transform(sig_f) - return np.real(transform) if real else transform + + transform = transform.real if real else transform + + if not on_gpu: + transform = transform.get() + + return transform From 1a14068cbc6c9c1d8ef2cfaa355960508ccaf15b Mon Sep 17 00:00:00 2001 From: Garrett Wright Date: Wed, 5 Jun 2024 10:17:06 -0400 Subject: [PATCH 036/139] whitespace --- bbenchmark/bbenchmark.py | 1 + 1 file changed, 1 insertion(+) diff --git a/bbenchmark/bbenchmark.py b/bbenchmark/bbenchmark.py index 6c6320c101..01aac6e7eb 100644 --- a/bbenchmark/bbenchmark.py +++ b/bbenchmark/bbenchmark.py @@ -5,6 +5,7 @@ import matplotlib.pyplot as plt import numpy as np + from aspire.basis import FFBBasis2D, FLEBasis2D from aspire.downloader import emdb_2660 from aspire.noise import WhiteNoiseAdder From 092cda07dedaec106f4fec98c33bbc09b87709b7 Mon Sep 17 00:00:00 2001 From: Garrett Wright Date: Wed, 5 Jun 2024 10:16:54 -0400 Subject: [PATCH 037/139] add sparse cupy gpu wrapper and tests for methods in use --- src/aspire/numeric/__init__.py | 19 ++++++++++++ tests/test_numeric_sparse.py | 54 ++++++++++++++++++++++++++++++++++ 2 files changed, 73 insertions(+) create mode 100644 tests/test_numeric_sparse.py diff --git a/src/aspire/numeric/__init__.py b/src/aspire/numeric/__init__.py index d298f131e4..95283e5d87 100644 --- a/src/aspire/numeric/__init__.py +++ b/src/aspire/numeric/__init__.py @@ -35,3 +35,22 @@ def fft_object(which): fft = fft_object(config["common"]["fft"].as_str()) + + +# Configure `sparse` in tandem with `numeric` as the arrays generally will need to interoperate. +def sparse_object(which): + if which == "cupy": + from cupyx.scipy import sparse as SparseClass + + # CuPy imports don't work the same as scipy + from cupyx.scipy.sparse.linalg import eigsh + + SparseClass.linalg.eigsh = eigsh + elif which == "numpy": + from scipy import sparse as SparseClass + else: + raise RuntimeError(f"Invalid selection for sparse module: {which}") + return SparseClass + + +sparse = sparse_object(config["common"]["numeric"].as_str()) diff --git a/tests/test_numeric_sparse.py b/tests/test_numeric_sparse.py new file mode 100644 index 0000000000..3964419e21 --- /dev/null +++ b/tests/test_numeric_sparse.py @@ -0,0 +1,54 @@ +import numpy as np +import pytest + +from aspire.numeric import numeric_object, sparse_object + +# If cupy is not available, skip this entire test module +pytest.importorskip("cupy") + +NUMERICS = ["numpy", "cupy"] + + +@pytest.fixture(params=NUMERICS, ids=lambda x: f"{x}", scope="module") +def backends(request): + xp = numeric_object(request.param) + sparse = sparse_object(request.param) + return xp, sparse + + +def test_csr_matrix(backends): + """ + Create csr_matrix and multiply with an `xp` array. + """ + xp, sparse = backends + + m, n = 10, 10 + jdx = xp.arange(10) + idx = xp.arange(10) + vals = xp.random.random(10) + + # Compute dense matmul + _A = np.diag(xp.asnumpy(vals)) + _B = np.random.random((10, 20)) + _C = _A @ _B + + # Compute matmul using sparse csr + A = sparse.csr_matrix((vals, (jdx, idx)), shape=(m, n), dtype=np.float64) + B = xp.array(_B) + C = A @ B + + # Compare + np.testing.assert_allclose(_C, xp.asnumpy(C)) + + +def test_eigsh(backends): + """ + Invoke sparse eigsh call with `xp` arrays. + """ + xp, sparse = backends + + A = xp.eye(123) + + lamb, _ = sparse.linalg.eigsh(A) + np.testing.assert_allclose(xp.asnumpy(lamb), 1.0) + print(lamb) From 415c9410ab6574a22888fb016c09a5853b04314b Mon Sep 17 00:00:00 2001 From: Garrett Wright Date: Wed, 5 Jun 2024 15:11:20 -0400 Subject: [PATCH 038/139] fixup mn --- tests/test_numeric_sparse.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/test_numeric_sparse.py b/tests/test_numeric_sparse.py index 3964419e21..45c28bef31 100644 --- a/tests/test_numeric_sparse.py +++ b/tests/test_numeric_sparse.py @@ -23,13 +23,13 @@ def test_csr_matrix(backends): xp, sparse = backends m, n = 10, 10 - jdx = xp.arange(10) - idx = xp.arange(10) + jdx = xp.arange(m) + idx = xp.arange(n) vals = xp.random.random(10) # Compute dense matmul _A = np.diag(xp.asnumpy(vals)) - _B = np.random.random((10, 20)) + _B = np.random.random((n, 20)) _C = _A @ _B # Compute matmul using sparse csr From 62d46204896dce87955638a8998fd3149925516f Mon Sep 17 00:00:00 2001 From: Garrett Wright Date: Wed, 5 Jun 2024 10:46:35 -0400 Subject: [PATCH 039/139] first pass migrating FLE to cupy via xp --- src/aspire/basis/fle_2d.py | 37 ++++++++++++++++++-------------- src/aspire/basis/fle_2d_utils.py | 16 ++++++++------ src/aspire/config_default.yaml | 2 +- 3 files changed, 31 insertions(+), 24 deletions(-) diff --git a/src/aspire/basis/fle_2d.py b/src/aspire/basis/fle_2d.py index 423d37c093..ce6f04d3a2 100644 --- a/src/aspire/basis/fle_2d.py +++ b/src/aspire/basis/fle_2d.py @@ -1,7 +1,6 @@ import logging import numpy as np -import scipy.sparse as sparse from scipy.fft import dct, idct from scipy.special import jv @@ -13,7 +12,7 @@ transform_complex_to_real, ) from aspire.nufft import anufft, nufft -from aspire.numeric import fft +from aspire.numeric import fft, sparse, xp from aspire.operators import DiagMatrix from aspire.utils import complex_type, grid_2d @@ -440,7 +439,7 @@ def _create_basis_functions(self): """ Generate the actual basis functions as Python lambda operators """ - norm_constants = np.zeros(self.count) + norm_constants = xp.zeros(self.count) basis_functions = [None] * self.count for i in range(self.count): # parameters defining the basis function: bessel order and which bessel root @@ -537,13 +536,14 @@ def _step2_t(self, z): num_img = z.shape[0] # Compute FFT along angular nodes betas = fft.fft(z, axis=2) / self.num_angular_nodes + betas = xp.asarray(betas) # RM betas = betas[:, :, self.nus] - betas = np.conj(betas) - betas = np.swapaxes(betas, 0, 2) + betas = betas.conj() + betas = betas.swapaxes(0, 2) betas = betas.reshape(-1, self.num_radial_nodes * num_img) betas = self.c2r_nus @ betas betas = betas.reshape(-1, self.num_radial_nodes, num_img) - betas = np.real(np.swapaxes(betas, 0, 2)) + betas = betas.swapaxes(0, 2).real return betas def _step3_t(self, betas): @@ -554,18 +554,20 @@ def _step3_t(self, betas): """ num_img = betas.shape[0] if self.num_interp > self.num_radial_nodes: + betas = xp.asnumpy(betas) betas = dct(betas, axis=1, type=2) / (2 * self.num_radial_nodes) zeros = np.zeros(betas.shape) betas = np.concatenate((betas, zeros), axis=1) betas = idct(betas, axis=1, type=2) * 2 * betas.shape[1] - betas = np.moveaxis(betas, 0, -1) + betas = xp.asarray(betas) + betas = xp.moveaxis(betas, 0, -1) - coefs = np.zeros((self.count, num_img), dtype=np.float64) + coefs = xp.zeros((self.count, num_img), dtype=np.float64) for i in range(self.ell_p_max + 1): coefs[self.idx_list[i]] = self.A3[i] @ betas[:, i, :] coefs = coefs.T - return coefs * self.norm_constants / self.h + return xp.asnumpy(coefs * self.norm_constants / self.h) def _step3(self, coefs): """ @@ -574,19 +576,20 @@ def _step3(self, coefs): Uses barycenteric interpolation in reverse to compute values of Betas at Chebyshev nodes, given an array of FLE coefficients. """ - coefs = coefs.copy().reshape(-1, self.count) + coefs = xp.asarray(coefs.reshape(-1, self.count)) num_img = coefs.shape[0] coefs *= self.h * self.norm_constants coefs = coefs.T - out = np.zeros( + out = xp.zeros( (self.num_interp, 2 * self.max_ell + 1, num_img), dtype=np.float64, ) for i in range(self.ell_p_max + 1): out[:, i, :] = self.A3_T[i] @ coefs[self.idx_list[i]] - out = np.moveaxis(out, -1, 0) + out = xp.moveaxis(out, -1, 0) if self.num_interp > self.num_radial_nodes: + out = xp.asnumpy(out) # RM out = dct(out, axis=1, type=2) out = out[:, : self.num_radial_nodes, :] out = idct(out, axis=1, type=2) @@ -599,19 +602,21 @@ def _step2(self, betas): to images). Uses the IFFT to convert Beta values into Fourier-space images. """ + betas = xp.asarray(betas) num_img = betas.shape[0] - tmp = np.zeros( + tmp = xp.zeros( (num_img, self.num_radial_nodes, self.num_angular_nodes), dtype=np.complex128, ) - betas = np.swapaxes(betas, 0, 2) + betas = betas.swapaxes(0, 2) betas = betas.reshape(-1, self.num_radial_nodes * num_img) betas = self.r2c_nus @ betas betas = betas.reshape(-1, self.num_radial_nodes, num_img) - betas = np.swapaxes(betas, 0, 2) + betas = betas.swapaxes(0, 2) - tmp[:, :, self.nus] = np.conj(betas) + tmp[:, :, self.nus] = betas.conj() + tmp = xp.asnumpy(tmp) # rm z = fft.ifft(tmp, axis=2) return z diff --git a/src/aspire/basis/fle_2d_utils.py b/src/aspire/basis/fle_2d_utils.py index cde0cd11bf..23d1441a68 100644 --- a/src/aspire/basis/fle_2d_utils.py +++ b/src/aspire/basis/fle_2d_utils.py @@ -1,5 +1,6 @@ import numpy as np -import scipy.sparse as sparse + +from aspire.numeric import sparse, xp def transform_complex_to_real(B, ells): @@ -43,9 +44,9 @@ def precomp_transform_complex_to_real(ells): """ count = len(ells) num_nonzero = np.sum(ells == 0) + 2 * np.sum(ells != 0) - idx = np.zeros(num_nonzero, dtype=int) - jdx = np.zeros(num_nonzero, dtype=int) - vals = np.zeros(num_nonzero, dtype=np.complex128) + idx = xp.zeros(num_nonzero, dtype=int) + jdx = xp.zeros(num_nonzero, dtype=int) + vals = xp.zeros(num_nonzero, dtype=np.complex128) k = 0 for i in range(count): @@ -190,9 +191,10 @@ def barycentric_interp_sparse(target_points, known_points, numsparse): # note that const cancels in numerator and denominator vals = vals / denom.reshape(-1, 1) - vals = vals.flatten() - idx = idx.flatten() - jdx = jdx.flatten() + # TODO, migrate more of this method towards `xp` + vals = xp.array(vals.flatten()) + idx = xp.array(idx.flatten()) + jdx = xp.array(jdx.flatten()) # A is the linear operator mapping the function values from the fixed source # points to the fixed target points. # A(i,j) = \ell(x[i] ) w_j/(x[i] - xs[j]), with the notation in Eq. 3.3 diff --git a/src/aspire/config_default.yaml b/src/aspire/config_default.yaml index def78983c0..26176a97ac 100644 --- a/src/aspire/config_default.yaml +++ b/src/aspire/config_default.yaml @@ -1,7 +1,7 @@ version: 0.12.3 common: # numeric module to use - one of numpy/cupy - numeric: numpy + numeric: cupy # fft backend to use - one of pyfftw/scipy/cupy/mkl fft: scipy From 51f60dbc201b58d36b7bfdf46ca21b24ced3c205 Mon Sep 17 00:00:00 2001 From: Garrett Wright Date: Wed, 5 Jun 2024 11:02:32 -0400 Subject: [PATCH 040/139] add dct/idct to pyfftw, scipy, cupy wrappers --- src/aspire/basis/fle_2d.py | 13 ++++++------- src/aspire/numeric/cupy_fft.py | 6 ++++++ src/aspire/numeric/pyfftw_fft.py | 6 ++++++ src/aspire/numeric/scipy_fft.py | 6 ++++++ 4 files changed, 24 insertions(+), 7 deletions(-) diff --git a/src/aspire/basis/fle_2d.py b/src/aspire/basis/fle_2d.py index ce6f04d3a2..5675becf86 100644 --- a/src/aspire/basis/fle_2d.py +++ b/src/aspire/basis/fle_2d.py @@ -1,7 +1,6 @@ import logging import numpy as np -from scipy.fft import dct, idct from scipy.special import jv from aspire.basis import Coef, FBBasisMixin, SteerableBasis2D @@ -555,10 +554,10 @@ def _step3_t(self, betas): num_img = betas.shape[0] if self.num_interp > self.num_radial_nodes: betas = xp.asnumpy(betas) - betas = dct(betas, axis=1, type=2) / (2 * self.num_radial_nodes) + betas = fft.dct(betas, axis=1, type=2) / (2 * self.num_radial_nodes) zeros = np.zeros(betas.shape) betas = np.concatenate((betas, zeros), axis=1) - betas = idct(betas, axis=1, type=2) * 2 * betas.shape[1] + betas = fft.idct(betas, axis=1, type=2) * 2 * betas.shape[1] betas = xp.asarray(betas) betas = xp.moveaxis(betas, 0, -1) @@ -590,9 +589,9 @@ def _step3(self, coefs): out = xp.moveaxis(out, -1, 0) if self.num_interp > self.num_radial_nodes: out = xp.asnumpy(out) # RM - out = dct(out, axis=1, type=2) + out = fft.dct(out, axis=1, type=2) out = out[:, : self.num_radial_nodes, :] - out = idct(out, axis=1, type=2) + out = fft.idct(out, axis=1, type=2) return out @@ -736,10 +735,10 @@ def _radial_convolve_weights(self, b): b = np.squeeze(b) b = np.array(b) if self.num_interp > self.num_radial_nodes: - b = dct(b, axis=0, type=2) / (2 * self.num_radial_nodes) + b = fft.dct(b, axis=0, type=2) / (2 * self.num_radial_nodes) bz = np.zeros(b.shape) b = np.concatenate((b, bz), axis=0) - b = idct(b, axis=0, type=2) * 2 * b.shape[0] + b = fft.idct(b, axis=0, type=2) * 2 * b.shape[0] a = np.zeros(self.count, dtype=np.float64) y = [None] * (self.ell_p_max + 1) for i in range(self.ell_p_max + 1): diff --git a/src/aspire/numeric/cupy_fft.py b/src/aspire/numeric/cupy_fft.py index 4f45f92117..3327c78f7d 100644 --- a/src/aspire/numeric/cupy_fft.py +++ b/src/aspire/numeric/cupy_fft.py @@ -33,3 +33,9 @@ def fftshift(self, x, axes=None): def ifftshift(self, x, axes=None): return cp.fft.ifftshift(x, axes=axes) + + def dct(self, *args, **kwargs): + return cp.fft.dct(*args, **kwargs) + + def idct(self, *args, **kwargs): + return cp.fft.idct(*args, **kwargs) diff --git a/src/aspire/numeric/pyfftw_fft.py b/src/aspire/numeric/pyfftw_fft.py index 9cfdd45210..afcad98d28 100644 --- a/src/aspire/numeric/pyfftw_fft.py +++ b/src/aspire/numeric/pyfftw_fft.py @@ -159,3 +159,9 @@ def fftshift(self, a, axes=None): def ifftshift(self, a, axes=None): return scipy_fft.ifftshift(a, axes=axes) + + def dct(self, *args, **kwargs): + return scipy_fft.dct(*args, **kwargs) + + def idct(self, *args, **kwargs): + return scipy_fft.idct(*args, **kwargs) diff --git a/src/aspire/numeric/scipy_fft.py b/src/aspire/numeric/scipy_fft.py index c5a392f96b..d78e463803 100644 --- a/src/aspire/numeric/scipy_fft.py +++ b/src/aspire/numeric/scipy_fft.py @@ -33,3 +33,9 @@ def fftshift(self, x, axes=None): def ifftshift(self, x, axes=None): return sp.fft.ifftshift(x, axes=axes) + + def dct(self, *args, **kwargs): + return sp.fft.dct(*args, **kwargs) + + def idct(self, *args, **kwargs): + return sp.fft.idct(*args, **kwargs) From 61e7db1a0a3b9218fa69e094ecf34a5dad57c488 Mon Sep 17 00:00:00 2001 From: Garrett Wright Date: Wed, 5 Jun 2024 11:52:13 -0400 Subject: [PATCH 041/139] phase 2, fle internals --- src/aspire/basis/fle_2d.py | 15 ++++----------- src/aspire/config_default.yaml | 2 +- src/aspire/image/image.py | 7 +++++-- src/aspire/numeric/__init__.py | 15 +++++++++++++++ src/aspire/numeric/cupy_fft.py | 5 +++-- src/aspire/numeric/numpy.py | 7 ++++++- 6 files changed, 34 insertions(+), 17 deletions(-) diff --git a/src/aspire/basis/fle_2d.py b/src/aspire/basis/fle_2d.py index 5675becf86..6215baaca8 100644 --- a/src/aspire/basis/fle_2d.py +++ b/src/aspire/basis/fle_2d.py @@ -496,7 +496,7 @@ def _evaluate_t(self, imgs): coefficients. """ # See Section 3.5 - imgs = imgs.copy() + imgs = xp.array(imgs) # Copy here, mutating. imgs[:, self.radial_mask] = 0 z = self._step1_t(imgs) b = self._step2_t(z) @@ -513,7 +513,7 @@ def _step1_t(self, im): """ im = im.reshape(-1, self.nres, self.nres).astype(complex_type(self.dtype)) num_img = im.shape[0] - z = np.zeros( + z = xp.zeros( (num_img, self.num_radial_nodes, self.num_angular_nodes), dtype=complex_type(self.dtype), ) @@ -535,7 +535,6 @@ def _step2_t(self, z): num_img = z.shape[0] # Compute FFT along angular nodes betas = fft.fft(z, axis=2) / self.num_angular_nodes - betas = xp.asarray(betas) # RM betas = betas[:, :, self.nus] betas = betas.conj() betas = betas.swapaxes(0, 2) @@ -553,12 +552,9 @@ def _step3_t(self, betas): """ num_img = betas.shape[0] if self.num_interp > self.num_radial_nodes: - betas = xp.asnumpy(betas) betas = fft.dct(betas, axis=1, type=2) / (2 * self.num_radial_nodes) - zeros = np.zeros(betas.shape) - betas = np.concatenate((betas, zeros), axis=1) + betas = xp.concatenate((betas, xp.zeros(betas.shape)), axis=1) betas = fft.idct(betas, axis=1, type=2) * 2 * betas.shape[1] - betas = xp.asarray(betas) betas = xp.moveaxis(betas, 0, -1) coefs = xp.zeros((self.count, num_img), dtype=np.float64) @@ -588,7 +584,6 @@ def _step3(self, coefs): out[:, i, :] = self.A3_T[i] @ coefs[self.idx_list[i]] out = xp.moveaxis(out, -1, 0) if self.num_interp > self.num_radial_nodes: - out = xp.asnumpy(out) # RM out = fft.dct(out, axis=1, type=2) out = out[:, : self.num_radial_nodes, :] out = fft.idct(out, axis=1, type=2) @@ -601,7 +596,6 @@ def _step2(self, betas): to images). Uses the IFFT to convert Beta values into Fourier-space images. """ - betas = xp.asarray(betas) num_img = betas.shape[0] tmp = xp.zeros( (num_img, self.num_radial_nodes, self.num_angular_nodes), @@ -615,7 +609,6 @@ def _step2(self, betas): betas = betas.swapaxes(0, 2) tmp[:, :, self.nus] = betas.conj() - tmp = xp.asnumpy(tmp) # rm z = fft.ifft(tmp, axis=2) return z @@ -639,7 +632,7 @@ def _step1(self, z): im = im.reshape(num_img, self.nres, self.nres) im[:, self.radial_mask] = 0 - return im + return xp.asnumpy(im) def _create_dense_matrix(self): """ diff --git a/src/aspire/config_default.yaml b/src/aspire/config_default.yaml index 26176a97ac..fed4cea50a 100644 --- a/src/aspire/config_default.yaml +++ b/src/aspire/config_default.yaml @@ -3,7 +3,7 @@ common: # numeric module to use - one of numpy/cupy numeric: cupy # fft backend to use - one of pyfftw/scipy/cupy/mkl - fft: scipy + fft: cupy # Set cache directory for ASPIRE example data. # By default the cache location will be set by pooch.os_cache(), diff --git a/src/aspire/image/image.py b/src/aspire/image/image.py index 20d998afe6..40d6087f13 100644 --- a/src/aspire/image/image.py +++ b/src/aspire/image/image.py @@ -393,11 +393,14 @@ def downsample(self, ds_res): im = self.stack_reshape(-1) # compute FT with centered 0-frequency - fx = fft.centered_fft2(im._data) + fx = xp.asnumpy(fft.centered_fft2(xp.asarray(im._data))) # crop 2D Fourier transform for each image crop_fx = np.array([crop_pad_2d(fx[i], ds_res) for i in range(self.n_images)]) # take back to real space, discard complex part, and scale - out = np.real(fft.centered_ifft2(crop_fx)) * (ds_res**2 / self.resolution**2) + out = fft.centered_ifft2(xp.asarray(crop_fx)).real * ( + ds_res**2 / self.resolution**2 + ) + out = xp.asnumpy(out) return self.__class__(out).stack_reshape(original_stack_shape) diff --git a/src/aspire/numeric/__init__.py b/src/aspire/numeric/__init__.py index 95283e5d87..be88775498 100644 --- a/src/aspire/numeric/__init__.py +++ b/src/aspire/numeric/__init__.py @@ -36,6 +36,21 @@ def fft_object(which): fft = fft_object(config["common"]["fft"].as_str()) +# Sanity check. +if (config["common"]["numeric"].as_str() == "cupy") and ( + config["common"]["fft"].as_str() != "cupy" +): + raise RuntimeError( + "Using `cupy` numeric backend without `cupy` fft is unsupported." + ) + +if (config["common"]["fft"].as_str() == "cupy") and ( + config["common"]["numeric"].as_str() != "cupy" +): + raise RuntimeError( + "Using `cupy` fft without `cupy` numeric backend is unsupported." + ) + # Configure `sparse` in tandem with `numeric` as the arrays generally will need to interoperate. def sparse_object(which): diff --git a/src/aspire/numeric/cupy_fft.py b/src/aspire/numeric/cupy_fft.py index 3327c78f7d..29939e504c 100644 --- a/src/aspire/numeric/cupy_fft.py +++ b/src/aspire/numeric/cupy_fft.py @@ -1,4 +1,5 @@ import cupy as cp +import cupyx.scipy.fft as cufft from aspire.numeric.base_fft import FFT @@ -35,7 +36,7 @@ def ifftshift(self, x, axes=None): return cp.fft.ifftshift(x, axes=axes) def dct(self, *args, **kwargs): - return cp.fft.dct(*args, **kwargs) + return cufft.dct(*args, **kwargs) def idct(self, *args, **kwargs): - return cp.fft.idct(*args, **kwargs) + return cufft.idct(*args, **kwargs) diff --git a/src/aspire/numeric/numpy.py b/src/aspire/numeric/numpy.py index 3237c2c3ad..9367409c78 100644 --- a/src/aspire/numeric/numpy.py +++ b/src/aspire/numeric/numpy.py @@ -1,8 +1,13 @@ +import cupy as cp import numpy as np class Numpy: - asnumpy = staticmethod(lambda x: x) + @staticmethod + def asnumpy(x): + if isinstance(x, cp.ndarray): + x = x.get() + return x def __getattr__(self, item): """ From a84fb5a75d90a292c1d71d6728926a45447d84ae Mon Sep 17 00:00:00 2001 From: Garrett Wright Date: Wed, 5 Jun 2024 12:20:05 -0400 Subject: [PATCH 042/139] mem cleanup workaround --- src/aspire/basis/fle_2d.py | 26 ++++++++++++++++++++++++-- 1 file changed, 24 insertions(+), 2 deletions(-) diff --git a/src/aspire/basis/fle_2d.py b/src/aspire/basis/fle_2d.py index 6215baaca8..4278331e1a 100644 --- a/src/aspire/basis/fle_2d.py +++ b/src/aspire/basis/fle_2d.py @@ -1,3 +1,4 @@ +import gc import logging import numpy as np @@ -18,6 +19,19 @@ logger = logging.getLogger(__name__) +def _cleanup(): + """ + Utility for informing python+cupy to cleanup memory held by old vars. + """ + gc.collect() + try: + import cupy + + cupy.get_default_memory_pool().free_all_blocks() + except ModuleNotFoundError: + pass + + class FLEBasis2D(SteerableBasis2D, FBBasisMixin): """ Define a derived class for Fast Fourier Bessel 2D expansion using interpolation @@ -499,12 +513,20 @@ def _evaluate_t(self, imgs): imgs = xp.array(imgs) # Copy here, mutating. imgs[:, self.radial_mask] = 0 z = self._step1_t(imgs) + del imgs + _cleanup() + b = self._step2_t(z) + del z + _cleanup() + coefs = self._step3_t(b) + del b + _cleanup() # return in FB order coefs = coefs[..., self._fle_to_fb_indices] - return coefs.astype(self.coefficient_dtype, copy=False) + return xp.asnumpy(coefs.astype(self.coefficient_dtype)) def _step1_t(self, im): """ @@ -562,7 +584,7 @@ def _step3_t(self, betas): coefs[self.idx_list[i]] = self.A3[i] @ betas[:, i, :] coefs = coefs.T - return xp.asnumpy(coefs * self.norm_constants / self.h) + return coefs * self.norm_constants / self.h def _step3(self, coefs): """ From 584330935be33f75c451972d78bcf5722e226232 Mon Sep 17 00:00:00 2001 From: Garrett Wright Date: Wed, 5 Jun 2024 14:54:57 -0400 Subject: [PATCH 043/139] cupy eigvals needs large problem or nans... --- tests/test_numeric_sparse.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_numeric_sparse.py b/tests/test_numeric_sparse.py index 45c28bef31..120a7de49f 100644 --- a/tests/test_numeric_sparse.py +++ b/tests/test_numeric_sparse.py @@ -47,7 +47,7 @@ def test_eigsh(backends): """ xp, sparse = backends - A = xp.eye(123) + A = xp.eye(1234) lamb, _ = sparse.linalg.eigsh(A) np.testing.assert_allclose(xp.asnumpy(lamb), 1.0) From a780c51a4c40dd1c36d39005afbe4e3deb7be615 Mon Sep 17 00:00:00 2001 From: Garrett Wright Date: Wed, 5 Jun 2024 14:55:34 -0400 Subject: [PATCH 044/139] crop pad updates --- src/aspire/image/image.py | 6 +++--- src/aspire/utils/coor_trans.py | 14 +++++++++----- 2 files changed, 12 insertions(+), 8 deletions(-) diff --git a/src/aspire/image/image.py b/src/aspire/image/image.py index 40d6087f13..5ea20e2343 100644 --- a/src/aspire/image/image.py +++ b/src/aspire/image/image.py @@ -393,11 +393,11 @@ def downsample(self, ds_res): im = self.stack_reshape(-1) # compute FT with centered 0-frequency - fx = xp.asnumpy(fft.centered_fft2(xp.asarray(im._data))) + fx = fft.centered_fft2(xp.asarray(im._data)) # crop 2D Fourier transform for each image - crop_fx = np.array([crop_pad_2d(fx[i], ds_res) for i in range(self.n_images)]) + crop_fx = crop_pad_2d(fx, ds_res) # take back to real space, discard complex part, and scale - out = fft.centered_ifft2(xp.asarray(crop_fx)).real * ( + out = fft.centered_ifft2(crop_fx).real * ( ds_res**2 / self.resolution**2 ) out = xp.asnumpy(out) diff --git a/src/aspire/utils/coor_trans.py b/src/aspire/utils/coor_trans.py index e909e2f394..844f218551 100644 --- a/src/aspire/utils/coor_trans.py +++ b/src/aspire/utils/coor_trans.py @@ -8,6 +8,7 @@ from numpy.linalg import norm from scipy.linalg import svd +from aspire.numeric import xp from aspire.utils.random import Random from aspire.utils.rotation import Rotation @@ -368,23 +369,26 @@ def rots_to_clmatrix(rots, n_theta): def crop_pad_2d(im, size, fill_value=0): """ - :param im: A 2-dimensional numpy array + :param im: A >=2-dimensional numpy array :param size: Integer size of cropped/padded output - :return: A numpy array of shape (size, size) + :return: A numpy array of shape (..., size, size) """ - im_y, im_x = im.shape + im_y, im_x = im.shape[-2:] # shift terms start_x = math.floor(im_x / 2) - math.floor(size / 2) start_y = math.floor(im_y / 2) - math.floor(size / 2) # cropping if size <= min(im_y, im_x): - return im[start_y : start_y + size, start_x : start_x + size] + return im[..., start_y : start_y + size, start_x : start_x + size] # padding elif size >= max(im_y, im_x): + # Determine shape + shape = list(im.shape[:-2]) + shape.extend([size,size]) # ensure that we return in the same dtype as the input - to_return = fill_value * np.ones((size, size), dtype=im.dtype) + to_return = xp.full(shape, fill_value, dtype=im.dtype) # when padding, start_x and start_y are negative since size is larger # than im_x and im_y; the below line calculates where the original image # is placed in relation to the (now-larger) box size From f4c8bf78db12bf844f3b9109bd22f2832c8bb199 Mon Sep 17 00:00:00 2001 From: Garrett Wright Date: Wed, 5 Jun 2024 14:56:56 -0400 Subject: [PATCH 045/139] tox cleanup [skip ci] --- src/aspire/image/image.py | 4 +--- src/aspire/utils/coor_trans.py | 2 +- tests/test_numeric_sparse.py | 1 - 3 files changed, 2 insertions(+), 5 deletions(-) diff --git a/src/aspire/image/image.py b/src/aspire/image/image.py index 5ea20e2343..cbe0bbf07d 100644 --- a/src/aspire/image/image.py +++ b/src/aspire/image/image.py @@ -397,9 +397,7 @@ def downsample(self, ds_res): # crop 2D Fourier transform for each image crop_fx = crop_pad_2d(fx, ds_res) # take back to real space, discard complex part, and scale - out = fft.centered_ifft2(crop_fx).real * ( - ds_res**2 / self.resolution**2 - ) + out = fft.centered_ifft2(crop_fx).real * (ds_res**2 / self.resolution**2) out = xp.asnumpy(out) return self.__class__(out).stack_reshape(original_stack_shape) diff --git a/src/aspire/utils/coor_trans.py b/src/aspire/utils/coor_trans.py index 844f218551..dfb1c630f1 100644 --- a/src/aspire/utils/coor_trans.py +++ b/src/aspire/utils/coor_trans.py @@ -386,7 +386,7 @@ def crop_pad_2d(im, size, fill_value=0): elif size >= max(im_y, im_x): # Determine shape shape = list(im.shape[:-2]) - shape.extend([size,size]) + shape.extend([size, size]) # ensure that we return in the same dtype as the input to_return = xp.full(shape, fill_value, dtype=im.dtype) # when padding, start_x and start_y are negative since size is larger diff --git a/tests/test_numeric_sparse.py b/tests/test_numeric_sparse.py index 120a7de49f..288e41a176 100644 --- a/tests/test_numeric_sparse.py +++ b/tests/test_numeric_sparse.py @@ -51,4 +51,3 @@ def test_eigsh(backends): lamb, _ = sparse.linalg.eigsh(A) np.testing.assert_allclose(xp.asnumpy(lamb), 1.0) - print(lamb) From 440175c9eb67d273826a9b38ec3e68b16b8cdb1b Mon Sep 17 00:00:00 2001 From: "Joshua C. Carmichael" Date: Tue, 4 Jun 2024 16:06:28 -0400 Subject: [PATCH 046/139] evaluate_t on gpu. --- src/aspire/basis/ffb_2d.py | 37 +++++++++++++++++-------------------- src/aspire/image/image.py | 2 +- 2 files changed, 18 insertions(+), 21 deletions(-) diff --git a/src/aspire/basis/ffb_2d.py b/src/aspire/basis/ffb_2d.py index 5a5c7c3f27..e9e6ab90a8 100644 --- a/src/aspire/basis/ffb_2d.py +++ b/src/aspire/basis/ffb_2d.py @@ -193,56 +193,53 @@ def _evaluate_t(self, x): n_images = x.shape[0] # resamping x in a polar Fourier gird using nonuniform discrete Fourier transform - pf = nufft(x, 2 * pi * freqs) - pf = np.reshape(pf, (n_images, n_r, n_theta)) + pf = nufft(xp.array(x), 2 * pi * freqs) + pf = pf.reshape(n_images, n_r, n_theta) # Recover "negative" frequencies from "positive" half plane. - pf = np.concatenate((pf, pf.conjugate()), axis=2) + pf = xp.concatenate((pf, pf.conjugate()), axis=2) # evaluate radial integral using the Gauss-Legendre quadrature rule - for i_r in range(0, n_r): - pf[:, i_r, :] = pf[:, i_r, :] * ( - self._precomp["gl_weights"][i_r] * self._precomp["gl_nodes"][i_r] - ) + pf = pf * (xp.array(self._precomp["gl_weights"]) * xp.array(self._precomp["gl_nodes"]))[None, :, None] # 1D FFT on the angular dimension for each concentric circle - pf = 2 * pi / (2 * n_theta) * xp.asnumpy(fft.fft(xp.asarray(pf))) + pf = 2 * xp.pi / (2 * n_theta) * fft.fft(pf) # This only makes it easier to slice the array later. - v = np.zeros((n_images, self.count), dtype=x.dtype) + v = xp.zeros((n_images, self.count), dtype=x.dtype) # go through each basis function and find the corresponding coefficient ind = 0 - idx = ind + np.arange(self.k_max[0]) + idx = ind + xp.arange(self.k_max[0]) # include the normalization factor of angular part into radial part - radial_norm = self._precomp["radial"] / np.expand_dims(self.angular_norms, 1) + radial_norm = xp.array(self._precomp["radial"] / np.expand_dims(self.angular_norms, 1)) v[:, self._zero_angular_inds] = pf[:, :, 0].real @ radial_norm[idx].T - ind = ind + np.size(idx) + ind = ind + idx.size ind_pos = ind for ell in range(1, self.ell_max + 1): - idx = ind + np.arange(self.k_max[ell]) - idx_pos = ind_pos + np.arange(self.k_max[ell]) + idx = ind + xp.arange(self.k_max[ell]) + idx_pos = ind_pos + xp.arange(self.k_max[ell]) idx_neg = idx_pos + self.k_max[ell] v_ell = pf[:, :, ell] @ radial_norm[idx].T if np.mod(ell, 2) == 0: - v_pos = np.real(v_ell) - v_neg = -np.imag(v_ell) + v_pos = v_ell.real + v_neg = -v_ell.imag else: - v_pos = np.imag(v_ell) - v_neg = np.real(v_ell) + v_pos = v_ell.imag + v_neg = v_ell.real v[:, idx_pos] = v_pos v[:, idx_neg] = v_neg - ind = ind + np.size(idx) + ind = ind + idx.size ind_pos = ind_pos + 2 * self.k_max[ell] - return v + return xp.asnumpy(v) def filter_to_basis_mat(self, f, **kwargs): """ diff --git a/src/aspire/image/image.py b/src/aspire/image/image.py index cbe0bbf07d..d1f9dc8c04 100644 --- a/src/aspire/image/image.py +++ b/src/aspire/image/image.py @@ -400,7 +400,7 @@ def downsample(self, ds_res): out = fft.centered_ifft2(crop_fx).real * (ds_res**2 / self.resolution**2) out = xp.asnumpy(out) - return self.__class__(out).stack_reshape(original_stack_shape) + return self.__class__(np.array(out.get())).stack_reshape(original_stack_shape) def filter(self, filter): """ From a64b8728f159463ff3a0620513a5b3a2651ef435 Mon Sep 17 00:00:00 2001 From: "Joshua C. Carmichael" Date: Wed, 5 Jun 2024 16:06:21 -0400 Subject: [PATCH 047/139] Optimize ffb2d for gpu. --- src/aspire/basis/ffb_2d.py | 56 ++++++++++++++++++++++---------------- 1 file changed, 32 insertions(+), 24 deletions(-) diff --git a/src/aspire/basis/ffb_2d.py b/src/aspire/basis/ffb_2d.py index e9e6ab90a8..996e44f95a 100644 --- a/src/aspire/basis/ffb_2d.py +++ b/src/aspire/basis/ffb_2d.py @@ -105,6 +105,7 @@ def _evaluate(self, v): coordinate basis. This is Image instance with resolution of `self.sz` and the first dimension correspond to remaining dimension of `v`. """ + v = xp.array(v) sz_roll = v.shape[:-1] v = v.reshape(-1, self.count) @@ -112,27 +113,29 @@ def _evaluate(self, v): n_data = v.shape[0] # get information on polar grids from precomputed data - n_theta = np.size(self._precomp["freqs"], 2) - n_r = np.size(self._precomp["freqs"], 1) + n_theta = self._precomp["freqs"].shape[2] + n_r = self._precomp["freqs"].shape[1] # go through each basis function and find corresponding coefficient - pf = np.zeros((n_data, 2 * n_theta, n_r), dtype=complex_type(self.dtype)) + pf = xp.zeros((n_data, 2 * n_theta, n_r), dtype=complex_type(self.dtype)) ind = 0 - idx = ind + np.arange(self.k_max[0], dtype=int) + idx = ind + xp.arange(self.k_max[0], dtype=int) # include the normalization factor of angular part into radial part - radial_norm = self._precomp["radial"] / np.expand_dims(self.angular_norms, 1) - pf[:, 0, :] = v[:, self._zero_angular_inds] @ radial_norm[idx] - ind = ind + np.size(idx) + radial_norm = xp.array(self._precomp["radial"]) / xp.array( + np.expand_dims(self.angular_norms, 1) + ) + pf[:, 0, :] = v[:, xp.array(self._zero_angular_inds)] @ radial_norm[idx] + ind = ind + idx.size ind_pos = ind for ell in range(1, self.ell_max + 1): - idx = ind + np.arange(self.k_max[ell], dtype=int) - idx_pos = ind_pos + np.arange(self.k_max[ell], dtype=int) - idx_neg = idx_pos + self.k_max[ell] + idx = ind + xp.arange(self.k_max[ell], dtype=int) + idx_pos = ind_pos + xp.arange(self.k_max[ell], dtype=int) + idx_neg = idx_pos + xp.array(self.k_max[ell]) v_ell = (v[:, idx_pos] - 1j * v[:, idx_neg]) / 2.0 @@ -147,22 +150,19 @@ def _evaluate(self, v): else: pf[:, 2 * n_theta - ell, :] = -pf_ell.conjugate() - ind = ind + np.size(idx) - ind_pos = ind_pos + 2 * self.k_max[ell] + ind = ind + idx.size + ind_pos = ind_pos + 2 * xp.array(self.k_max[ell]) # 1D inverse FFT in the degree of polar angle - pf = 2 * pi * xp.asnumpy(fft.ifft(xp.asarray(pf), axis=1)) + pf = 2 * xp.pi * fft.ifft(xp.asarray(pf), axis=1) # Only need "positive" frequencies. - hsize = int(np.size(pf, 1) / 2) + hsize = int(pf.shape[1] / 2) pf = pf[:, 0:hsize, :] - - for i_r in range(0, n_r): - pf[..., i_r] = pf[..., i_r] * ( - self._precomp["gl_weights"][i_r] * self._precomp["gl_nodes"][i_r] - ) - - pf = np.reshape(pf, (n_data, n_r * n_theta)) + pf *= ( + xp.array(self._precomp["gl_weights"]) * xp.array(self._precomp["gl_nodes"]) + )[None, None, :] + pf = pf.reshape(n_data, n_r * n_theta) # perform inverse non-uniformly FFT transform back to 2D coordinate basis freqs = m_reshape(self._precomp["freqs"], (2, n_r * n_theta)) @@ -172,7 +172,7 @@ def _evaluate(self, v): # Return X as Image instance with the last two dimensions as *self.sz x = x.reshape((*sz_roll, *self.sz)) - return x + return xp.asnumpy(x) def _evaluate_t(self, x): """ @@ -200,7 +200,13 @@ def _evaluate_t(self, x): pf = xp.concatenate((pf, pf.conjugate()), axis=2) # evaluate radial integral using the Gauss-Legendre quadrature rule - pf = pf * (xp.array(self._precomp["gl_weights"]) * xp.array(self._precomp["gl_nodes"]))[None, :, None] + pf = ( + pf + * ( + xp.array(self._precomp["gl_weights"]) + * xp.array(self._precomp["gl_nodes"]) + )[None, :, None] + ) # 1D FFT on the angular dimension for each concentric circle pf = 2 * xp.pi / (2 * n_theta) * fft.fft(pf) @@ -213,7 +219,9 @@ def _evaluate_t(self, x): idx = ind + xp.arange(self.k_max[0]) # include the normalization factor of angular part into radial part - radial_norm = xp.array(self._precomp["radial"] / np.expand_dims(self.angular_norms, 1)) + radial_norm = xp.array( + self._precomp["radial"] / np.expand_dims(self.angular_norms, 1) + ) v[:, self._zero_angular_inds] = pf[:, :, 0].real @ radial_norm[idx].T ind = ind + idx.size From 8a6b4c4ee280c4f8470ea851ef3670a3707cb394 Mon Sep 17 00:00:00 2001 From: Josh Carmichael Date: Tue, 11 Jun 2024 11:01:51 -0400 Subject: [PATCH 048/139] downsample return --- src/aspire/image/image.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/aspire/image/image.py b/src/aspire/image/image.py index d1f9dc8c04..cbe0bbf07d 100644 --- a/src/aspire/image/image.py +++ b/src/aspire/image/image.py @@ -400,7 +400,7 @@ def downsample(self, ds_res): out = fft.centered_ifft2(crop_fx).real * (ds_res**2 / self.resolution**2) out = xp.asnumpy(out) - return self.__class__(np.array(out.get())).stack_reshape(original_stack_shape) + return self.__class__(out).stack_reshape(original_stack_shape) def filter(self, filter): """ From 81ba7afa7553eefd90fa3c6c2448d517f188f3fa Mon Sep 17 00:00:00 2001 From: Josh Carmichael Date: Tue, 11 Jun 2024 11:43:52 -0400 Subject: [PATCH 049/139] remove unnecessary xp.array --- src/aspire/basis/ffb_2d.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/aspire/basis/ffb_2d.py b/src/aspire/basis/ffb_2d.py index 996e44f95a..4971b44f77 100644 --- a/src/aspire/basis/ffb_2d.py +++ b/src/aspire/basis/ffb_2d.py @@ -154,7 +154,7 @@ def _evaluate(self, v): ind_pos = ind_pos + 2 * xp.array(self.k_max[ell]) # 1D inverse FFT in the degree of polar angle - pf = 2 * xp.pi * fft.ifft(xp.asarray(pf), axis=1) + pf = 2 * xp.pi * fft.ifft(pf, axis=1) # Only need "positive" frequencies. hsize = int(pf.shape[1] / 2) From 325129fae9b89f152c2f0a134ae10a11b010bd7e Mon Sep 17 00:00:00 2001 From: Josh Carmichael Date: Tue, 11 Jun 2024 15:24:56 -0400 Subject: [PATCH 050/139] convert pf to complex --- src/aspire/basis/ffb_2d.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/aspire/basis/ffb_2d.py b/src/aspire/basis/ffb_2d.py index 4971b44f77..821800ee54 100644 --- a/src/aspire/basis/ffb_2d.py +++ b/src/aspire/basis/ffb_2d.py @@ -167,7 +167,9 @@ def _evaluate(self, v): # perform inverse non-uniformly FFT transform back to 2D coordinate basis freqs = m_reshape(self._precomp["freqs"], (2, n_r * n_theta)) - x = 2 * anufft(pf, 2 * pi * freqs, self.sz, real=True) + x = 2 * anufft( + pf.astype(complex_type(self.dtype)), 2 * pi * freqs, self.sz, real=True + ) # Return X as Image instance with the last two dimensions as *self.sz x = x.reshape((*sz_roll, *self.sz)) From af6d519655003dba8ee927c01f48f24c6718ae1e Mon Sep 17 00:00:00 2001 From: Josh Carmichael Date: Thu, 13 Jun 2024 08:59:56 -0400 Subject: [PATCH 051/139] precompute radial_norm and gl_weighted_nodes in build. --- src/aspire/basis/ffb_2d.py | 38 +++++++++++++++++--------------------- 1 file changed, 17 insertions(+), 21 deletions(-) diff --git a/src/aspire/basis/ffb_2d.py b/src/aspire/basis/ffb_2d.py index 821800ee54..891ab2600d 100644 --- a/src/aspire/basis/ffb_2d.py +++ b/src/aspire/basis/ffb_2d.py @@ -58,6 +58,16 @@ def _build(self): # precompute the basis functions in 2D grids self._precomp = self._precomp() + # include the normalization factor of angular part into radial part + self.radial_norm = xp.array(self._precomp["radial"]) / xp.array( + np.expand_dims(self.angular_norms, 1) + ) + + # precompute weighted nodes + self.gl_weighted_nodes = xp.array(self._precomp["gl_weights"]) * xp.array( + self._precomp["gl_nodes"] + ) + def _precomp(self): """ Precomute the basis functions on a polar Fourier grid @@ -123,11 +133,7 @@ def _evaluate(self, v): idx = ind + xp.arange(self.k_max[0], dtype=int) - # include the normalization factor of angular part into radial part - radial_norm = xp.array(self._precomp["radial"]) / xp.array( - np.expand_dims(self.angular_norms, 1) - ) - pf[:, 0, :] = v[:, xp.array(self._zero_angular_inds)] @ radial_norm[idx] + pf[:, 0, :] = v[:, xp.array(self._zero_angular_inds)] @ self.radial_norm[idx] ind = ind + idx.size ind_pos = ind @@ -142,7 +148,7 @@ def _evaluate(self, v): if np.mod(ell, 2) == 1: v_ell = 1j * v_ell - pf_ell = v_ell @ radial_norm[idx] + pf_ell = v_ell @ self.radial_norm[idx] pf[:, ell, :] = pf_ell if np.mod(ell, 2) == 0: @@ -159,9 +165,7 @@ def _evaluate(self, v): # Only need "positive" frequencies. hsize = int(pf.shape[1] / 2) pf = pf[:, 0:hsize, :] - pf *= ( - xp.array(self._precomp["gl_weights"]) * xp.array(self._precomp["gl_nodes"]) - )[None, None, :] + pf *= self.gl_weighted_nodes[None, None, :] pf = pf.reshape(n_data, n_r * n_theta) # perform inverse non-uniformly FFT transform back to 2D coordinate basis @@ -202,13 +206,7 @@ def _evaluate_t(self, x): pf = xp.concatenate((pf, pf.conjugate()), axis=2) # evaluate radial integral using the Gauss-Legendre quadrature rule - pf = ( - pf - * ( - xp.array(self._precomp["gl_weights"]) - * xp.array(self._precomp["gl_nodes"]) - )[None, :, None] - ) + pf *= self.gl_weighted_nodes[None, :, None] # 1D FFT on the angular dimension for each concentric circle pf = 2 * xp.pi / (2 * n_theta) * fft.fft(pf) @@ -221,10 +219,8 @@ def _evaluate_t(self, x): idx = ind + xp.arange(self.k_max[0]) # include the normalization factor of angular part into radial part - radial_norm = xp.array( - self._precomp["radial"] / np.expand_dims(self.angular_norms, 1) - ) - v[:, self._zero_angular_inds] = pf[:, :, 0].real @ radial_norm[idx].T + + v[:, self._zero_angular_inds] = pf[:, :, 0].real @ self.radial_norm[idx].T ind = ind + idx.size ind_pos = ind @@ -233,7 +229,7 @@ def _evaluate_t(self, x): idx_pos = ind_pos + xp.arange(self.k_max[ell]) idx_neg = idx_pos + self.k_max[ell] - v_ell = pf[:, :, ell] @ radial_norm[idx].T + v_ell = pf[:, :, ell] @ self.radial_norm[idx].T if np.mod(ell, 2) == 0: v_pos = v_ell.real From a98e00b8e9a321ef61beb93550b79444217cc9c3 Mon Sep 17 00:00:00 2001 From: Josh Carmichael Date: Thu, 13 Jun 2024 09:07:46 -0400 Subject: [PATCH 052/139] remove comment --- src/aspire/basis/ffb_2d.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/aspire/basis/ffb_2d.py b/src/aspire/basis/ffb_2d.py index 891ab2600d..1c7ed8cad1 100644 --- a/src/aspire/basis/ffb_2d.py +++ b/src/aspire/basis/ffb_2d.py @@ -218,8 +218,6 @@ def _evaluate_t(self, x): ind = 0 idx = ind + xp.arange(self.k_max[0]) - # include the normalization factor of angular part into radial part - v[:, self._zero_angular_inds] = pf[:, :, 0].real @ self.radial_norm[idx].T ind = ind + idx.size From 92c61f29cc009ee365be2af818f9ff00f35c8212 Mon Sep 17 00:00:00 2001 From: Josh Carmichael Date: Thu, 13 Jun 2024 10:42:35 -0400 Subject: [PATCH 053/139] use asarray --- src/aspire/basis/ffb_2d.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/src/aspire/basis/ffb_2d.py b/src/aspire/basis/ffb_2d.py index 1c7ed8cad1..f44e959149 100644 --- a/src/aspire/basis/ffb_2d.py +++ b/src/aspire/basis/ffb_2d.py @@ -59,12 +59,12 @@ def _build(self): self._precomp = self._precomp() # include the normalization factor of angular part into radial part - self.radial_norm = xp.array(self._precomp["radial"]) / xp.array( + self.radial_norm = xp.asarray(self._precomp["radial"]) / xp.asarray( np.expand_dims(self.angular_norms, 1) ) # precompute weighted nodes - self.gl_weighted_nodes = xp.array(self._precomp["gl_weights"]) * xp.array( + self.gl_weighted_nodes = xp.asarray(self._precomp["gl_weights"]) * xp.asarray( self._precomp["gl_nodes"] ) @@ -115,7 +115,7 @@ def _evaluate(self, v): coordinate basis. This is Image instance with resolution of `self.sz` and the first dimension correspond to remaining dimension of `v`. """ - v = xp.array(v) + v = xp.asarray(v) sz_roll = v.shape[:-1] v = v.reshape(-1, self.count) @@ -133,7 +133,7 @@ def _evaluate(self, v): idx = ind + xp.arange(self.k_max[0], dtype=int) - pf[:, 0, :] = v[:, xp.array(self._zero_angular_inds)] @ self.radial_norm[idx] + pf[:, 0, :] = v[:, xp.asarray(self._zero_angular_inds)] @ self.radial_norm[idx] ind = ind + idx.size ind_pos = ind @@ -141,7 +141,7 @@ def _evaluate(self, v): for ell in range(1, self.ell_max + 1): idx = ind + xp.arange(self.k_max[ell], dtype=int) idx_pos = ind_pos + xp.arange(self.k_max[ell], dtype=int) - idx_neg = idx_pos + xp.array(self.k_max[ell]) + idx_neg = idx_pos + xp.asarray(self.k_max[ell]) v_ell = (v[:, idx_pos] - 1j * v[:, idx_neg]) / 2.0 @@ -157,7 +157,7 @@ def _evaluate(self, v): pf[:, 2 * n_theta - ell, :] = -pf_ell.conjugate() ind = ind + idx.size - ind_pos = ind_pos + 2 * xp.array(self.k_max[ell]) + ind_pos = ind_pos + 2 * xp.asarray(self.k_max[ell]) # 1D inverse FFT in the degree of polar angle pf = 2 * xp.pi * fft.ifft(pf, axis=1) @@ -199,7 +199,7 @@ def _evaluate_t(self, x): n_images = x.shape[0] # resamping x in a polar Fourier gird using nonuniform discrete Fourier transform - pf = nufft(xp.array(x), 2 * pi * freqs) + pf = nufft(xp.asarray(x), 2 * pi * freqs) pf = pf.reshape(n_images, n_r, n_theta) # Recover "negative" frequencies from "positive" half plane. From 030062c1bad6af15be6161992f0b1d027717fdd8 Mon Sep 17 00:00:00 2001 From: Josh Carmichael Date: Thu, 13 Jun 2024 13:36:16 -0400 Subject: [PATCH 054/139] Remove cupy.fill culprit. un-cupy indices. --- src/aspire/basis/ffb_2d.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/aspire/basis/ffb_2d.py b/src/aspire/basis/ffb_2d.py index f44e959149..116e009757 100644 --- a/src/aspire/basis/ffb_2d.py +++ b/src/aspire/basis/ffb_2d.py @@ -141,7 +141,7 @@ def _evaluate(self, v): for ell in range(1, self.ell_max + 1): idx = ind + xp.arange(self.k_max[ell], dtype=int) idx_pos = ind_pos + xp.arange(self.k_max[ell], dtype=int) - idx_neg = idx_pos + xp.asarray(self.k_max[ell]) + idx_neg = idx_pos + self.k_max[ell] v_ell = (v[:, idx_pos] - 1j * v[:, idx_neg]) / 2.0 @@ -157,7 +157,7 @@ def _evaluate(self, v): pf[:, 2 * n_theta - ell, :] = -pf_ell.conjugate() ind = ind + idx.size - ind_pos = ind_pos + 2 * xp.asarray(self.k_max[ell]) + ind_pos = ind_pos + 2 * self.k_max[ell] # 1D inverse FFT in the degree of polar angle pf = 2 * xp.pi * fft.ifft(pf, axis=1) From 58838e3b3f461c716f39f0c4f62f4a01e480d5ef Mon Sep 17 00:00:00 2001 From: Josh Carmichael Date: Thu, 13 Jun 2024 14:10:48 -0400 Subject: [PATCH 055/139] cupy.fill culprit in fle_2d. sparse indices. --- src/aspire/basis/fle_2d_utils.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/src/aspire/basis/fle_2d_utils.py b/src/aspire/basis/fle_2d_utils.py index 23d1441a68..33f237165e 100644 --- a/src/aspire/basis/fle_2d_utils.py +++ b/src/aspire/basis/fle_2d_utils.py @@ -44,9 +44,9 @@ def precomp_transform_complex_to_real(ells): """ count = len(ells) num_nonzero = np.sum(ells == 0) + 2 * np.sum(ells != 0) - idx = xp.zeros(num_nonzero, dtype=int) - jdx = xp.zeros(num_nonzero, dtype=int) - vals = xp.zeros(num_nonzero, dtype=np.complex128) + idx = np.zeros(num_nonzero, dtype=int) + jdx = np.zeros(num_nonzero, dtype=int) + vals = np.zeros(num_nonzero, dtype=np.complex128) k = 0 for i in range(count): @@ -86,7 +86,11 @@ def precomp_transform_complex_to_real(ells): jdx[k] = i + 1 k = k + 1 - A = sparse.csr_matrix((vals, (idx, jdx)), shape=(count, count), dtype=np.complex128) + A = sparse.csr_matrix( + (xp.asarray(vals), (xp.asarray(idx), xp.asarray(jdx))), + shape=(count, count), + dtype=np.complex128, + ) return A.conjugate() From 03697ff0a596c4a82d149e45015d9d24342ad265 Mon Sep 17 00:00:00 2001 From: Garrett Wright Date: Thu, 13 Jun 2024 10:58:04 -0400 Subject: [PATCH 056/139] bare min vol hack --- src/aspire/volume/volume.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/aspire/volume/volume.py b/src/aspire/volume/volume.py index b6c100db36..7883f59190 100644 --- a/src/aspire/volume/volume.py +++ b/src/aspire/volume/volume.py @@ -475,15 +475,16 @@ def downsample(self, ds_res, mask=None): v = self.stack_reshape(-1) # take 3D Fourier transform of each volume in the stack - fx = fft.fftshift(fft.fftn(v._data, axes=(1, 2, 3))) + fx = xp.asnumpy(fft.fftshift(fft.fftn(xp.asarray(v._data), axes=(1, 2, 3)))) # crop each volume to the desired resolution in frequency space crop_fx = ( np.array([crop_pad_3d(fx[i, :, :, :], ds_res) for i in range(self.n_vols)]) * mask ) # inverse Fourier transform of each volume - out = fft.ifftn(fft.ifftshift(crop_fx), axes=(1, 2, 3)) * ( - ds_res**3 / self.resolution**3 + out = xp.asnumpy( + fft.ifftn(fft.ifftshift(xp.asarray(crop_fx)), axes=(1, 2, 3)) + * (ds_res**3 / self.resolution**3) ) # returns a new Volume object return self.__class__( From eceaf2547ae72b41698df02cf7ebeba4a9a713c7 Mon Sep 17 00:00:00 2001 From: Garrett Wright Date: Thu, 13 Jun 2024 11:33:04 -0400 Subject: [PATCH 057/139] bare min ffb3d hacks [skip ci] --- src/aspire/basis/ffb_3d.py | 52 ++++++++++++++++++++------------------ 1 file changed, 28 insertions(+), 24 deletions(-) diff --git a/src/aspire/basis/ffb_3d.py b/src/aspire/basis/ffb_3d.py index 6362a9a703..1ac5fd62ff 100644 --- a/src/aspire/basis/ffb_3d.py +++ b/src/aspire/basis/ffb_3d.py @@ -6,6 +6,7 @@ from aspire.basis import FBBasis3D from aspire.basis.basis_utils import lgwt, norm_assoc_legendre, sph_bessel from aspire.nufft import anufft, nufft +from aspire.numeric import xp from aspire.utils.matlab_compat import m_flatten, m_reshape logger = logging.getLogger(__name__) @@ -146,10 +147,10 @@ def _precomp(self): ) return { - "radial_wtd": radial_wtd, - "ang_phi_wtd_even": ang_phi_wtd_even, - "ang_phi_wtd_odd": ang_phi_wtd_odd, - "ang_theta_wtd": ang_theta_wtd, + "radial_wtd": xp.asarray(radial_wtd), + "ang_phi_wtd_even": [xp.asarray(x) for x in ang_phi_wtd_even], + "ang_phi_wtd_odd": [xp.asarray(x) for x in ang_phi_wtd_odd], + "ang_theta_wtd": xp.asarray(ang_theta_wtd), "fourier_pts": fourier_pts, } @@ -163,6 +164,7 @@ def _evaluate(self, v): coordinate basis. This is an array whose last three dimensions equal `self.sz` and the remaining dimensions correspond to `v`. """ + v = xp.asarray(v) # roll dimensions of v sz_roll = v.shape[:-1] v = v.reshape((-1, self.count)) @@ -175,7 +177,7 @@ def _evaluate(self, v): # number of 3D image samples n_data = v.shape[0] - u_even = np.zeros( + u_even = xp.zeros( ( n_r, int(2 * self.ell_max + 1), @@ -184,7 +186,7 @@ def _evaluate(self, v): ), dtype=v.dtype, ) - u_odd = np.zeros( + u_odd = xp.zeros( (n_r, int(2 * self.ell_max + 1), n_data, int(np.ceil(self.ell_max / 2))), dtype=v.dtype, ) @@ -216,10 +218,10 @@ def _evaluate(self, v): int((ell - 1) / 2), ] = v_ell - u_even = np.transpose(u_even, (3, 0, 1, 2)) - u_odd = np.transpose(u_odd, (3, 0, 1, 2)) - w_even = np.zeros((n_phi, n_r, n_data, 2 * self.ell_max + 1), dtype=v.dtype) - w_odd = np.zeros((n_phi, n_r, n_data, 2 * self.ell_max + 1), dtype=v.dtype) + u_even = xp.transpose(u_even, (3, 0, 1, 2)) + u_odd = xp.transpose(u_odd, (3, 0, 1, 2)) + w_even = xp.zeros((n_phi, n_r, n_data, 2 * self.ell_max + 1), dtype=v.dtype) + w_odd = xp.zeros((n_phi, n_r, n_data, 2 * self.ell_max + 1), dtype=v.dtype) # evaluate the phi parts for m in range(0, self.ell_max + 1): @@ -252,8 +254,8 @@ def _evaluate(self, v): w_even[:, :, :, self.ell_max + sgn * m] = w_m_even w_odd[:, :, :, self.ell_max + sgn * m] = w_m_odd - w_even = np.transpose(w_even, (3, 0, 1, 2)) - w_odd = np.transpose(w_odd, (3, 0, 1, 2)) + w_even = xp.transpose(w_even, (3, 0, 1, 2)) + w_odd = xp.transpose(w_odd, (3, 0, 1, 2)) u_even = w_even u_odd = w_odd @@ -266,7 +268,7 @@ def _evaluate(self, v): pf = w_even + 1j * w_odd pf = m_reshape(pf, (n_theta * n_phi * n_r, n_data)) - pf = np.moveaxis(pf, 0, -1) + pf = xp.moveaxis(pf, 0, -1) # perform inverse non-uniformly FFT transformation back to 3D rectangular coordinates freqs = m_reshape(self._precomp["fourier_pts"], (3, n_r * n_theta * n_phi)) @@ -275,7 +277,7 @@ def _evaluate(self, v): # Roll, return the x with the last three dimensions as self.sz # Higher dimensions should be like v. x = x.reshape((*sz_roll, *self.sz)) - return x + return xp.asnumpy(x) def _evaluate_t(self, x): """ @@ -288,6 +290,7 @@ def _evaluate_t(self, x): `self.count` and whose remaining dimensions correspond to higher dimensions of `x`. """ + x = xp.asarray(x) # roll dimensions sz_roll = x.shape[:-3] x = x.reshape((-1, *self.sz)) @@ -303,20 +306,21 @@ def _evaluate_t(self, x): pf = m_reshape(pf.T, (n_theta, n_phi * n_r * n_data)) # evaluate the theta parts - u_even = self._precomp["ang_theta_wtd"].T @ np.real(pf) - u_odd = self._precomp["ang_theta_wtd"].T @ np.imag(pf) + tmp = self._precomp["ang_theta_wtd"].T + u_even = tmp @ xp.real(pf) + u_odd = tmp @ xp.imag(pf) u_even = m_reshape(u_even, (2 * self.ell_max + 1, n_phi, n_r, n_data)) u_odd = m_reshape(u_odd, (2 * self.ell_max + 1, n_phi, n_r, n_data)) - u_even = np.transpose(u_even, (1, 2, 3, 0)) - u_odd = np.transpose(u_odd, (1, 2, 3, 0)) + u_even = xp.transpose(u_even, (1, 2, 3, 0)) + u_odd = xp.transpose(u_odd, (1, 2, 3, 0)) - w_even = np.zeros( + w_even = xp.zeros( (int(np.floor(self.ell_max / 2) + 1), n_r, 2 * self.ell_max + 1, n_data), dtype=x.dtype, ) - w_odd = np.zeros( + w_odd = xp.zeros( (int(np.ceil(self.ell_max / 2)), n_r, 2 * self.ell_max + 1, n_data), dtype=x.dtype, ) @@ -351,11 +355,11 @@ def _evaluate_t(self, x): end = np.size(w_odd, 0) w_odd[end - n_odd_ell : end, :, self.ell_max + sgn * m, :] = w_m_odd - w_even = np.transpose(w_even, (1, 2, 3, 0)) - w_odd = np.transpose(w_odd, (1, 2, 3, 0)) + w_even = xp.transpose(w_even, (1, 2, 3, 0)) + w_odd = xp.transpose(w_odd, (1, 2, 3, 0)) # evaluate the radial parts - v = np.zeros((n_data, self.count), dtype=x.dtype) + v = xp.zeros((n_data, self.count), dtype=x.dtype) for ell in range(0, self.ell_max + 1): k_max_ell = self.k_max[ell] radial_wtd = self._precomp["radial_wtd"][:, 0:k_max_ell, ell] @@ -388,4 +392,4 @@ def _evaluate_t(self, x): # Roll dimensions, last dimension should be self.count, # Higher dimensions like x. v = v.reshape((*sz_roll, self.count)) - return v + return xp.asnumpy(v) From 1aae0727909ff9ff569cdc1078bedc8bc50f21e1 Mon Sep 17 00:00:00 2001 From: Garrett Wright Date: Thu, 13 Jun 2024 14:58:00 -0400 Subject: [PATCH 058/139] better style --- src/aspire/basis/ffb_3d.py | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/src/aspire/basis/ffb_3d.py b/src/aspire/basis/ffb_3d.py index 1ac5fd62ff..5740900a34 100644 --- a/src/aspire/basis/ffb_3d.py +++ b/src/aspire/basis/ffb_3d.py @@ -218,8 +218,8 @@ def _evaluate(self, v): int((ell - 1) / 2), ] = v_ell - u_even = xp.transpose(u_even, (3, 0, 1, 2)) - u_odd = xp.transpose(u_odd, (3, 0, 1, 2)) + u_even = u_even.transpose((3, 0, 1, 2)) + u_odd = u_odd.transpose((3, 0, 1, 2)) w_even = xp.zeros((n_phi, n_r, n_data, 2 * self.ell_max + 1), dtype=v.dtype) w_odd = xp.zeros((n_phi, n_r, n_data, 2 * self.ell_max + 1), dtype=v.dtype) @@ -254,8 +254,8 @@ def _evaluate(self, v): w_even[:, :, :, self.ell_max + sgn * m] = w_m_even w_odd[:, :, :, self.ell_max + sgn * m] = w_m_odd - w_even = xp.transpose(w_even, (3, 0, 1, 2)) - w_odd = xp.transpose(w_odd, (3, 0, 1, 2)) + w_even = w_even.transpose((3, 0, 1, 2)) + w_odd = w_odd.transpose((3, 0, 1, 2)) u_even = w_even u_odd = w_odd @@ -307,14 +307,14 @@ def _evaluate_t(self, x): # evaluate the theta parts tmp = self._precomp["ang_theta_wtd"].T - u_even = tmp @ xp.real(pf) - u_odd = tmp @ xp.imag(pf) + u_even = tmp @ pf.real + u_odd = tmp @ pf.imag u_even = m_reshape(u_even, (2 * self.ell_max + 1, n_phi, n_r, n_data)) u_odd = m_reshape(u_odd, (2 * self.ell_max + 1, n_phi, n_r, n_data)) - u_even = xp.transpose(u_even, (1, 2, 3, 0)) - u_odd = xp.transpose(u_odd, (1, 2, 3, 0)) + u_even = u_even.transpose((1, 2, 3, 0)) + u_odd = u_odd.transpose((1, 2, 3, 0)) w_even = xp.zeros( (int(np.floor(self.ell_max / 2) + 1), n_r, 2 * self.ell_max + 1, n_data), @@ -355,8 +355,8 @@ def _evaluate_t(self, x): end = np.size(w_odd, 0) w_odd[end - n_odd_ell : end, :, self.ell_max + sgn * m, :] = w_m_odd - w_even = xp.transpose(w_even, (1, 2, 3, 0)) - w_odd = xp.transpose(w_odd, (1, 2, 3, 0)) + w_even = w_even.transpose((1, 2, 3, 0)) + w_odd = w_odd.transpose((1, 2, 3, 0)) # evaluate the radial parts v = xp.zeros((n_data, self.count), dtype=x.dtype) From d63b1dc909673a55378bbca2dbb789b8ff1b7262 Mon Sep 17 00:00:00 2001 From: Josh Carmichael Date: Thu, 13 Jun 2024 15:11:04 -0400 Subject: [PATCH 059/139] last cupy fill --- src/aspire/basis/fle_2d.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/aspire/basis/fle_2d.py b/src/aspire/basis/fle_2d.py index 4278331e1a..df1d66c608 100644 --- a/src/aspire/basis/fle_2d.py +++ b/src/aspire/basis/fle_2d.py @@ -452,7 +452,7 @@ def _create_basis_functions(self): """ Generate the actual basis functions as Python lambda operators """ - norm_constants = xp.zeros(self.count) + norm_constants = np.zeros(self.count) basis_functions = [None] * self.count for i in range(self.count): # parameters defining the basis function: bessel order and which bessel root @@ -481,7 +481,7 @@ def _create_basis_functions(self): norm_constants[i] = c - self.norm_constants = norm_constants + self.norm_constants = xp.asarray(norm_constants) self.basis_functions = basis_functions def _evaluate(self, coefs): From b16fa015dd0691adfee557442c0c3188173f3a11 Mon Sep 17 00:00:00 2001 From: Garrett Wright Date: Tue, 18 Jun 2024 09:39:42 -0400 Subject: [PATCH 060/139] revert config to numpy/scipy --- src/aspire/config_default.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/aspire/config_default.yaml b/src/aspire/config_default.yaml index fed4cea50a..def78983c0 100644 --- a/src/aspire/config_default.yaml +++ b/src/aspire/config_default.yaml @@ -1,9 +1,9 @@ version: 0.12.3 common: # numeric module to use - one of numpy/cupy - numeric: cupy + numeric: numpy # fft backend to use - one of pyfftw/scipy/cupy/mkl - fft: cupy + fft: scipy # Set cache directory for ASPIRE example data. # By default the cache location will be set by pooch.os_cache(), From 41e3208572749ffc655ed1584e86a5e20a8f65a7 Mon Sep 17 00:00:00 2001 From: Garrett Wright Date: Tue, 18 Jun 2024 16:13:28 -0400 Subject: [PATCH 061/139] fft host array preservation --- src/aspire/numeric/cupy_fft.py | 45 +++++++++++++++++++++++++++++++--- 1 file changed, 41 insertions(+), 4 deletions(-) diff --git a/src/aspire/numeric/cupy_fft.py b/src/aspire/numeric/cupy_fft.py index 29939e504c..7d73367bc7 100644 --- a/src/aspire/numeric/cupy_fft.py +++ b/src/aspire/numeric/cupy_fft.py @@ -1,9 +1,36 @@ +import functools + import cupy as cp import cupyx.scipy.fft as cufft from aspire.numeric.base_fft import FFT +def _preserve_host(func): + """ + Method decorator that returns a numpy/cupy array result when passed a numpy/cupy array input. + + This improves the flexibility of our FFT wrappers by allowing for incremental code changes. + """ + + @functools.wraps(func) # Pass metadata (eg name and doctrings) from `func` + def wrapper(self, x, *args, **kwargs): + + _host = False + if not isinstance(x, cp.ndarray): + _host = True + x = cp.asarray(x) + + res = func(self, x, *args, **kwargs) + + if _host: + res = res.get() + + return res + + return wrapper + + class CupyFFT(FFT): """ Define a unified wrapper class for Cupy FFT functions @@ -11,32 +38,42 @@ class CupyFFT(FFT): To be consistent with Scipy and Pyfftw, not all arguments are included. """ + @_preserve_host def fft(self, x, axis=-1, workers=-1): return cp.fft.fft(x, axis=axis) + @_preserve_host def ifft(self, x, axis=-1, workers=-1): return cp.fft.ifft(x, axis=axis) + @_preserve_host def fft2(self, x, axes=(-2, -1), workers=-1): return cp.fft.fft2(x, axes=axes) + @_preserve_host def ifft2(self, x, axes=(-2, -1), workers=-1): return cp.fft.ifft2(x, axes=axes) + @_preserve_host def fftn(self, x, axes=None, workers=-1): return cp.fft.fftn(x, axes=axes) + @_preserve_host def ifftn(self, x, axes=None, workers=-1): return cp.fft.ifftn(x, axes=axes) + @_preserve_host def fftshift(self, x, axes=None): return cp.fft.fftshift(x, axes=axes) + @_preserve_host def ifftshift(self, x, axes=None): return cp.fft.ifftshift(x, axes=axes) - def dct(self, *args, **kwargs): - return cufft.dct(*args, **kwargs) + @_preserve_host + def dct(self, x, **kwargs): + return cufft.dct(x, **kwargs) - def idct(self, *args, **kwargs): - return cufft.idct(*args, **kwargs) + @_preserve_host + def idct(self, x, **kwargs): + return cufft.idct(x, **kwargs) From ca657a7bab1e98cc392a814bebbea85a342125ef Mon Sep 17 00:00:00 2001 From: Garrett Wright Date: Thu, 20 Jun 2024 09:29:34 -0400 Subject: [PATCH 062/139] interop crop_pad_2d --- src/aspire/utils/coor_trans.py | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/src/aspire/utils/coor_trans.py b/src/aspire/utils/coor_trans.py index dfb1c630f1..771c5bd5af 100644 --- a/src/aspire/utils/coor_trans.py +++ b/src/aspire/utils/coor_trans.py @@ -369,6 +369,11 @@ def rots_to_clmatrix(rots, n_theta): def crop_pad_2d(im, size, fill_value=0): """ + Crop/pads `im` according to `size`. + + Padding will use `fill_value`. + Return's host/gpu array based on `im`. + :param im: A >=2-dimensional numpy array :param size: Integer size of cropped/padded output :return: A numpy array of shape (..., size, size) @@ -387,8 +392,16 @@ def crop_pad_2d(im, size, fill_value=0): # Determine shape shape = list(im.shape[:-2]) shape.extend([size, size]) - # ensure that we return in the same dtype as the input - to_return = xp.full(shape, fill_value, dtype=im.dtype) + + # Ensure that we return in the same dtype as the input + _full = np.full # Default to numpy array + if isinstance(im, xp.ndarray): + # Use cupy when `im` _and_ xp are cupy ndarray + # Avoids having to handle when cupy is not installed + _full = xp.full + + to_return = _full(shape, fill_value, dtype=im.dtype) + # when padding, start_x and start_y are negative since size is larger # than im_x and im_y; the below line calculates where the original image # is placed in relation to the (now-larger) box size From dbe66e5e7f9a6fa97ba81829155e31608d7792f1 Mon Sep 17 00:00:00 2001 From: Garrett Wright Date: Thu, 20 Jun 2024 10:11:49 -0400 Subject: [PATCH 063/139] interop fle radial convolve --- src/aspire/basis/fle_2d.py | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/src/aspire/basis/fle_2d.py b/src/aspire/basis/fle_2d.py index df1d66c608..332b84f64d 100644 --- a/src/aspire/basis/fle_2d.py +++ b/src/aspire/basis/fle_2d.py @@ -721,10 +721,12 @@ def radial_convolve(self, coefs, radial_img): "`radial_convolve` currently only implemented for 1D stacks." ) - coefs = coefs.asnumpy() + # Potentially migrate to GPU + coefs = xp.asarray(coefs.asnumpy()) + radial_img = xp.asarray(radial_img) num_img = coefs.shape[0] - coefs_conv = np.zeros(coefs.shape) + coefs_conv = xp.zeros(coefs.shape) # Convert to internal FLE indices ordering coefs = coefs[..., self._fb_to_fle_indices] @@ -736,25 +738,26 @@ def radial_convolve(self, coefs, radial_img): weights = self._radial_convolve_weights(b) b = weights / (self.h**2) b = b.reshape(self.count) - coefs_conv[k, :] = np.real(self.c2r @ (b * (self.r2c @ _coefs).flatten())) + coefs_conv[k, :] = (self.c2r @ (b * (self.r2c @ _coefs).flatten())).real # Convert from internal FLE ordering to FB convention coefs_conv = coefs_conv[..., self._fle_to_fb_indices] - return Coef(self, coefs_conv) + # Return as Coef on host + return Coef(self, xp.asnumpy(coefs_conv)) def _radial_convolve_weights(self, b): """ Helper function for step 3 of convolving with a radial function. """ - b = np.squeeze(b) - b = np.array(b) + b = xp.squeeze(b) + b = xp.array(b) # implies copy if self.num_interp > self.num_radial_nodes: b = fft.dct(b, axis=0, type=2) / (2 * self.num_radial_nodes) - bz = np.zeros(b.shape) - b = np.concatenate((b, bz), axis=0) + bz = xp.zeros(b.shape) + b = xp.concatenate((b, bz), axis=0) b = fft.idct(b, axis=0, type=2) * 2 * b.shape[0] - a = np.zeros(self.count, dtype=np.float64) + a = xp.zeros(self.count, dtype=np.float64) y = [None] * (self.ell_p_max + 1) for i in range(self.ell_p_max + 1): y[i] = (self.A3[i] @ b[:, 0]).flatten() From 8e2f20033068167de4dc396fbc1ef824d06d7d02 Mon Sep 17 00:00:00 2001 From: Garrett Wright Date: Thu, 20 Jun 2024 14:05:33 -0400 Subject: [PATCH 064/139] cleanup --- src/aspire/utils/coor_trans.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/aspire/utils/coor_trans.py b/src/aspire/utils/coor_trans.py index 771c5bd5af..ef7857c994 100644 --- a/src/aspire/utils/coor_trans.py +++ b/src/aspire/utils/coor_trans.py @@ -401,7 +401,7 @@ def crop_pad_2d(im, size, fill_value=0): _full = xp.full to_return = _full(shape, fill_value, dtype=im.dtype) - + # when padding, start_x and start_y are negative since size is larger # than im_x and im_y; the below line calculates where the original image # is placed in relation to the (now-larger) box size From 57a3679f0a0aaf6fba5a8e8638d8a00dfe8459e2 Mon Sep 17 00:00:00 2001 From: Garrett Wright Date: Thu, 20 Jun 2024 14:06:43 -0400 Subject: [PATCH 065/139] remove bbenchmark code, hackathon over --- bbenchmark/bbenchmark.py | 58 ------------------------------ bbenchmark/benchmark_gpu0.pkl | Bin 307 -> 0 bytes bbenchmark/benchmark_host.pkl | Bin 307 -> 0 bytes bbenchmark/plot_bb.py | 64 ---------------------------------- 4 files changed, 122 deletions(-) delete mode 100644 bbenchmark/bbenchmark.py delete mode 100644 bbenchmark/benchmark_gpu0.pkl delete mode 100644 bbenchmark/benchmark_host.pkl delete mode 100644 bbenchmark/plot_bb.py diff --git a/bbenchmark/bbenchmark.py b/bbenchmark/bbenchmark.py deleted file mode 100644 index 01aac6e7eb..0000000000 --- a/bbenchmark/bbenchmark.py +++ /dev/null @@ -1,58 +0,0 @@ -import os -import pickle -from pprint import pprint -from time import perf_counter, time - -import matplotlib.pyplot as plt -import numpy as np - -from aspire.basis import FFBBasis2D, FLEBasis2D -from aspire.downloader import emdb_2660 -from aspire.noise import WhiteNoiseAdder -from aspire.source import ArrayImageSource, Simulation - -# Download and cache volume map -vol = emdb_2660().astype(np.float64) # doubles -cached_image_fn = "simulated_images.npy" - -if os.path.exists(cached_image_fn): - print(f"Loading cached image source from {cached_image_fn}.") - sim = ArrayImageSource(np.load(cached_image_fn)) -else: - print("Generating Simulated Datatset") - sim = Simulation( - n=512, C=1, vols=vol, noise_adder=WhiteNoiseAdder.from_snr(0.1) - ).cache() - print(f"Saving to {cached_image_fn}") - np.save(cached_image_fn, sim.images[:].asnumpy()) - - -TIMES = {} -for L in [32, 64, 128, 256]: - print(f"Begin L={L}") - src = sim.downsample(L) - imgs = src.images[:] - TIMES[L] = {} - for basis_type in [FFBBasis2D, FLEBasis2D]: - # Construct basis - TIMES[L][basis_type.__name__] = {} - basis = basis_type(L, dtype=src.dtype) - - # Time expanding into basis - tic = perf_counter() - coef = basis.evaluate_t(imgs) - toc = perf_counter() - TIMES[L][basis_type.__name__]["evaluate_t"] = toc - tic - - # Time expanding back into images - tic = perf_counter() - _ = coef.evaluate() - toc = perf_counter() - TIMES[L][basis_type.__name__]["evaluate"] = toc - tic - - -pprint(TIMES) - - -with open(f"benchmark_{int(time())}.pkl", "wb") as fh: - pickle.dump(TIMES, fh) diff --git a/bbenchmark/benchmark_gpu0.pkl b/bbenchmark/benchmark_gpu0.pkl deleted file mode 100644 index e702dd442dced27acb26fcc3b3e395ce9f465585..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 307 zcmZo*nX19a00y;FG`tmnL=Tsno0C&wab~fR%M>s_wJb5GG_fQ#zGRBK{fB=m-#lPo z=;45g0>v(0*=`CnqZFvs#}!Fy28+7`_dcgM4hDt{R(A)1g!$SUKxL)g4nT7=m_P)J zyZseA`*#jt74}bVm#$9$s>oo2$TFJo@? J0igC$Jpf$(W#0e* diff --git a/bbenchmark/benchmark_host.pkl b/bbenchmark/benchmark_host.pkl deleted file mode 100644 index dc0dd2a1769fc52c9470986e74eb64864f59e7fe..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 307 zcmZo*nX19a00y;FG`tmnL=Tsno0C&wab~fR%M>s_wJb5GG_fQ#zGRBK{l>F_wm|hg z957L!*k;SlN}yONP^*tClGY3scLzS3waO9<3>mEM4m>kTmTLf&m3lh>&COr}5iIWZ zmm4fH4}ewJUv3m%3ovVBHPKy1(9iTm;ktG~fPnyM$W- zvToDPIg_qJbek#on}>jO`!X;hX?O6pRZ8-OC=s5uZo?jA?UmgRJ_s6sonHA^-k?wb IsJ&DV0PQbeIsgCw diff --git a/bbenchmark/plot_bb.py b/bbenchmark/plot_bb.py deleted file mode 100644 index 05f5350f4b..0000000000 --- a/bbenchmark/plot_bb.py +++ /dev/null @@ -1,64 +0,0 @@ -import os -import pickle -from pprint import pprint - -import matplotlib.pyplot as plt -import numpy as np - -host_fn = "benchmark_host.pkl" -gpu_fn = "benchmark_gpu0.pkl" - - -with open(host_fn, "rb") as fh: - host_times = pickle.load(fh) - -with open(gpu_fn, "rb") as fh: - gpu_times = pickle.load(fh) - -markers = {"FFBBasis2D": "8", "FLEBasis2D": "s"} - -# Evaluate_t -Ls = list(host_times.keys()) -for basis_type in markers.keys(): - plt.plot( - Ls, - [host_times[L][basis_type]["evaluate_t"] for L in Ls], - marker=markers[basis_type], - color="blue", - label=basis_type + "-host", - ) - plt.plot( - Ls, - [gpu_times[L][basis_type]["evaluate_t"] for L in Ls], - marker=markers[basis_type], - color="green", - label=basis_type + "-gpu", - ) -plt.title("Basis `evaluate_t` Permformance - Batch of 512 Images") -plt.xlabel("Image Pixel L (LxL)") -plt.ylabel("Time (seconds)") -plt.legend() -plt.savefig("evaluate_t.png") -plt.show() - -for basis_type in markers.keys(): - plt.plot( - Ls, - [host_times[L][basis_type]["evaluate"] for L in Ls], - marker=markers[basis_type], - color="blue", - label=basis_type + "-host", - ) - plt.plot( - Ls, - [gpu_times[L][basis_type]["evaluate"] for L in Ls], - marker=markers[basis_type], - color="green", - label=basis_type + "-gpu", - ) -plt.title("Basis `evaluate` Permformance - Batch of 512 Images") -plt.xlabel("Image Pixel L (LxL)") -plt.ylabel("Time (seconds)") -plt.legend() -plt.savefig("evaluate.png") -plt.show() From 07daa17e4e9e93dd9af7d0e1ac2cca13fbdde8a2 Mon Sep 17 00:00:00 2001 From: Garrett Wright Date: Thu, 20 Jun 2024 14:25:30 -0400 Subject: [PATCH 066/139] fix interop cp check --- src/aspire/nufft/__init__.py | 12 +++++++----- src/aspire/numeric/numpy.py | 9 +++++++-- 2 files changed, 14 insertions(+), 7 deletions(-) diff --git a/src/aspire/nufft/__init__.py b/src/aspire/nufft/__init__.py index f748a10d84..c2db4f2e8e 100644 --- a/src/aspire/nufft/__init__.py +++ b/src/aspire/nufft/__init__.py @@ -2,13 +2,15 @@ import numpy as np +from aspire import config +from aspire.utils import LogFilterByCount, complex_type, real_type + +cp = None try: import cupy as cp except ModuleNotFoundError: - cp = None + pass -from aspire import config -from aspire.utils import LogFilterByCount, complex_type, real_type logger = logging.getLogger(__name__) @@ -196,7 +198,7 @@ def anufft(sig_f, fourier_pts, sz, real=False, epsilon=1e-8): adjoint = adjoint.real if real else adjoint - if not on_gpu: + if cp and not on_gpu: adjoint = adjoint.get() return adjoint @@ -257,7 +259,7 @@ def nufft(sig_f, fourier_pts, real=False, epsilon=1e-8): transform = transform.real if real else transform - if not on_gpu: + if cp and not on_gpu: transform = transform.get() return transform diff --git a/src/aspire/numeric/numpy.py b/src/aspire/numeric/numpy.py index 9367409c78..07627399f9 100644 --- a/src/aspire/numeric/numpy.py +++ b/src/aspire/numeric/numpy.py @@ -1,11 +1,16 @@ -import cupy as cp import numpy as np +cp = None +try: + import cupy as cp +except ModuleNotFoundError: + pass + class Numpy: @staticmethod def asnumpy(x): - if isinstance(x, cp.ndarray): + if cp and isinstance(x, cp.ndarray): x = x.get() return x From 1947c7d2c9ac86210093e56f8795592b91c21724 Mon Sep 17 00:00:00 2001 From: Garrett Wright Date: Thu, 20 Jun 2024 15:22:03 -0400 Subject: [PATCH 067/139] use cupy modes on ampere_gpu jobs --- .github/workflows/workflow.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/workflow.yml b/.github/workflows/workflow.yml index f2d5472e52..24528a6b72 100644 --- a/.github/workflows/workflow.yml +++ b/.github/workflows/workflow.yml @@ -148,6 +148,8 @@ jobs: echo "WORK_DIR=${WORK_DIR}" >> $GITHUB_ENV echo -e "ray:\n temp_dir: ${WORK_DIR}\n" > ${WORK_DIR}/config.yaml echo -e "common:\n cache_dir: ${CI_CACHE_DIR}\n" >> ${WORK_DIR}/config.yaml + echo -e " numeric: cupy\n" >> ${WORK_DIR}/config.yaml + echo -e " fft: cupy\n" >> ${WORK_DIR}/config.yaml echo "Log the config: ${WORK_DIR}/config.yaml" cat ${WORK_DIR}/config.yaml - name: Run From 8e60d46f347d03fca0773c00fb66f9eaf01d0499 Mon Sep 17 00:00:00 2001 From: Garrett Wright Date: Thu, 20 Jun 2024 15:24:25 -0400 Subject: [PATCH 068/139] ws cleanup in gha config gen --- .github/workflows/workflow.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/workflow.yml b/.github/workflows/workflow.yml index 24528a6b72..fce5a7f6d4 100644 --- a/.github/workflows/workflow.yml +++ b/.github/workflows/workflow.yml @@ -147,8 +147,8 @@ jobs: echo "Stash the WORK_DIR to GitHub env so we can clean it up later." echo "WORK_DIR=${WORK_DIR}" >> $GITHUB_ENV echo -e "ray:\n temp_dir: ${WORK_DIR}\n" > ${WORK_DIR}/config.yaml - echo -e "common:\n cache_dir: ${CI_CACHE_DIR}\n" >> ${WORK_DIR}/config.yaml - echo -e " numeric: cupy\n" >> ${WORK_DIR}/config.yaml + echo -e "common:\n cache_dir: ${CI_CACHE_DIR}" >> ${WORK_DIR}/config.yaml + echo -e " numeric: cupy" >> ${WORK_DIR}/config.yaml echo -e " fft: cupy\n" >> ${WORK_DIR}/config.yaml echo "Log the config: ${WORK_DIR}/config.yaml" cat ${WORK_DIR}/config.yaml From c7eb9dd4867f568bc1eb765a9796dd41895c914d Mon Sep 17 00:00:00 2001 From: Garrett Wright Date: Thu, 20 Jun 2024 17:14:46 -0400 Subject: [PATCH 069/139] remove older GPU environments. --- pyproject.toml | 4 ---- 1 file changed, 4 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index fcc0f7cf4f..c9c25a9976 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -61,10 +61,6 @@ dependencies = [ "Source" = "https://github.com/ComputationalCryoEM/ASPIRE-Python" [project.optional-dependencies] -gpu-102 = ["cupy-cuda102", "cufinufft==1.3"] -gpu-110 = ["cupy-cuda110", "cufinufft==1.3"] -gpu-111 = ["cupy-cuda111", "cufinufft==1.3"] -gpu-11x = ["cupy-cuda11x", "cufinufft==1.3"] gpu-12x = ["cupy-cuda12x", "cufinufft==2.2.0"] dev = [ "black", From 8accd1f42b7597650d1c5f0d174ab80c47036260 Mon Sep 17 00:00:00 2001 From: Garrett Wright Date: Fri, 21 Jun 2024 09:47:04 -0400 Subject: [PATCH 070/139] fle basis to mat xp conversion --- src/aspire/basis/fle_2d.py | 26 ++++++++++++++++---------- 1 file changed, 16 insertions(+), 10 deletions(-) diff --git a/src/aspire/basis/fle_2d.py b/src/aspire/basis/fle_2d.py index 332b84f64d..e82dbb5d2f 100644 --- a/src/aspire/basis/fle_2d.py +++ b/src/aspire/basis/fle_2d.py @@ -786,20 +786,26 @@ def filter_to_basis_mat(self, f, **kwargs): # get 2D grid in polar coordinate k_vals, wts = lgwt(n_k, 0, 0.5, dtype=self.dtype) - k, theta = np.meshgrid( - k_vals, np.arange(n_theta) * 2 * np.pi / (2 * n_theta), indexing="ij" + k, theta = xp.meshgrid( + xp.asarray(k_vals), + xp.arange(n_theta) * 2 * np.pi / (2 * n_theta), + indexing="ij", ) # Get function values in polar 2D grid and average out angle contribution # NOTE: should probably just let the ctf objects handle this... - omegax = k * np.cos(theta) - omegay = k * np.sin(theta) - omega = 2 * np.pi * np.vstack((omegax.flatten("C"), omegay.flatten("C"))) - - h_vals2d = h_fun(omega).reshape(n_k, n_theta).astype(self.dtype) - h_vals = np.sum(h_vals2d, axis=1) / n_theta + omegax = k * xp.cos(theta) + omegay = k * xp.sin(theta) + omega = 2 * xp.pi * xp.vstack((omegax.flatten("C"), omegay.flatten("C"))) + + h_vals2d = ( + xp.asarray(h_fun(omega)) + .reshape(n_k, n_theta) + .astype(self.dtype, copy=False) + ) + h_vals = xp.sum(h_vals2d, axis=1) / n_theta - h_basis = np.zeros(self.count, dtype=self.dtype) + h_basis = xp.zeros(self.count, dtype=self.dtype) # For now we just need to handle 1D (stack of one ctf) for j in range(self.ell_p_max + 1): h_basis[self.idx_list[j]] = self.A3[j] @ h_vals @@ -807,4 +813,4 @@ def filter_to_basis_mat(self, f, **kwargs): # Convert from internal FLE ordering to FB convention h_basis = h_basis[self._fle_to_fb_indices] - return DiagMatrix(h_basis) + return DiagMatrix(xp.asnumpy(h_basis)) From 7b3b080cc1f9fb38e9b9948c4fc2ce11ca30306d Mon Sep 17 00:00:00 2001 From: Garrett Wright Date: Fri, 21 Jun 2024 10:14:39 -0400 Subject: [PATCH 071/139] better eigsh sanity check --- tests/test_numeric_sparse.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/tests/test_numeric_sparse.py b/tests/test_numeric_sparse.py index 288e41a176..5a8227fe47 100644 --- a/tests/test_numeric_sparse.py +++ b/tests/test_numeric_sparse.py @@ -47,7 +47,8 @@ def test_eigsh(backends): """ xp, sparse = backends - A = xp.eye(1234) + n = 123 + A = xp.diag(xp.arange(1, n + 1, dtype=np.float64)) - lamb, _ = sparse.linalg.eigsh(A) - np.testing.assert_allclose(xp.asnumpy(lamb), 1.0) + lamb, _ = sparse.linalg.eigsh(A, k=1) + np.testing.assert_allclose(xp.asnumpy(lamb), n) From ac63b7cdc46e7df4bc11aa7346effd92783a96e7 Mon Sep 17 00:00:00 2001 From: Garrett Wright Date: Mon, 24 Jun 2024 10:50:24 -0400 Subject: [PATCH 072/139] cupy fft accuracy casting work around --- src/aspire/numeric/cupy_fft.py | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/src/aspire/numeric/cupy_fft.py b/src/aspire/numeric/cupy_fft.py index 7d73367bc7..043780c7a7 100644 --- a/src/aspire/numeric/cupy_fft.py +++ b/src/aspire/numeric/cupy_fft.py @@ -2,6 +2,7 @@ import cupy as cp import cupyx.scipy.fft as cufft +import numpy as np from aspire.numeric.base_fft import FFT @@ -16,6 +17,17 @@ def _preserve_host(func): @functools.wraps(func) # Pass metadata (eg name and doctrings) from `func` def wrapper(self, x, *args, **kwargs): + # CuPy's single precision FFT appears to be too inaccurate for + # many of our unit tests, so the signal is upcast and recast + # on return. + _singles = False + if x.dtype == np.float32: + _singles = True + x = x.astype(np.float64) + elif x.dtype == np.complex64: + _singles = True + x = x.astype(np.complex128) + _host = False if not isinstance(x, cp.ndarray): _host = True @@ -26,6 +38,12 @@ def wrapper(self, x, *args, **kwargs): if _host: res = res.get() + # Recast if needed. + if _singles and res.dtype == np.float64: + res = res.astype(np.float32) + elif _singles and res.dtype == np.complex128: + res = res.astype(np.complex64) + return res return wrapper From 47ee759e5880c882f1746f6699ca6de469f8f385 Mon Sep 17 00:00:00 2001 From: Garrett Wright Date: Mon, 24 Jun 2024 10:51:01 -0400 Subject: [PATCH 073/139] some numpy/cupy interop tweaks --- src/aspire/image/image.py | 15 ++++++++------- src/aspire/volume/volume.py | 8 ++++---- 2 files changed, 12 insertions(+), 11 deletions(-) diff --git a/src/aspire/image/image.py b/src/aspire/image/image.py index cbe0bbf07d..f4d89eb3a5 100644 --- a/src/aspire/image/image.py +++ b/src/aspire/image/image.py @@ -413,17 +413,18 @@ def filter(self, filter): im = self.stack_reshape(-1) - filter_values = filter.evaluate_grid(self.resolution) + filter_values = xp.asarray(filter.evaluate_grid(self.resolution)) - im_f = xp.asnumpy(fft.centered_fft2(xp.asarray(im._data))) + im_f = fft.centered_fft2(xp.asarray(im._data)) # TODO: why are these different? Doesn't the broadcast work? if im_f.ndim > filter_values.ndim: im_f *= filter_values else: im_f = filter_values * im_f - im = xp.asnumpy(fft.centered_ifft2(xp.asarray(im_f))) - im = np.real(im) + + im = fft.centered_ifft2(im_f) + im = xp.asnumpy(im.real) return self.__class__(im).stack_reshape(original_stack_shape) @@ -497,7 +498,7 @@ def _im_translate(self, shifts): shifts = shifts.astype(self.dtype) L = self.resolution - im_f = xp.asnumpy(fft.fft2(xp.asarray(im))) + im_f = xp.asnumpy(fft.fft2(xp.asarray(im))) # todo grid_shifted = fft.ifftshift( xp.asarray(np.ceil(np.arange(-L / 2, L / 2, dtype=self.dtype))) ) @@ -513,8 +514,8 @@ def _im_translate(self, shifts): ) mult_f = np.exp(-1j * phase_shifts) im_translated_f = im_f * mult_f - im_translated = xp.asnumpy(fft.ifft2(xp.asarray(im_translated_f))) - im_translated = np.real(im_translated) + im_translated = fft.ifft2(xp.asarray(im_translated_f)) + im_translated = xp.asnumpy(im_translated.real) # Reshape to stack shape return self.__class__(im_translated).stack_reshape(stack_shape) diff --git a/src/aspire/volume/volume.py b/src/aspire/volume/volume.py index 7883f59190..b7e4245ede 100644 --- a/src/aspire/volume/volume.py +++ b/src/aspire/volume/volume.py @@ -342,13 +342,13 @@ def project(self, rot_matrices): if rot_matrices.ndim == 2: rot_matrices = np.expand_dims(rot_matrices, axis=0) - data = self._data + data = xp.asarray(self._data) n_rots = rot_matrices.shape[0] pts_rot = rotated_grids(self.resolution, rot_matrices) if n_rots == self.n_vols: # Apply rotations to Volumes element-wise. - im_f = np.empty( + im_f = xp.empty( (self.n_vols, self.resolution**2), dtype=complex_type(self.dtype) ) pts_rot = pts_rot.reshape((3, n_rots, self.resolution**2)) @@ -370,9 +370,9 @@ def project(self, rot_matrices): im_f[:, 0, :] = 0 im_f[:, :, 0] = 0 - im_f = xp.asnumpy(fft.centered_ifft2(xp.asarray(im_f))) + im_f = fft.centered_ifft2(im_f) - return aspire.image.Image(np.real(im_f)) + return aspire.image.Image(xp.asnumpy(im_f.real)) def to_vec(self): """Returns an N x resolution ** 3 array.""" From e58e47afbcff763400fbbc41a2f64832c963da96 Mon Sep 17 00:00:00 2001 From: Garrett Wright Date: Mon, 24 Jun 2024 10:57:14 -0400 Subject: [PATCH 074/139] more image interop tweaks --- src/aspire/image/image.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/src/aspire/image/image.py b/src/aspire/image/image.py index f4d89eb3a5..8ad8dc8711 100644 --- a/src/aspire/image/image.py +++ b/src/aspire/image/image.py @@ -495,15 +495,15 @@ def _im_translate(self, shifts): n_shifts == 1 or n_shifts == self.n_images ), "number of shifts must be 1 or match the number of images" # Cast shifts to this instance's internal dtype - shifts = shifts.astype(self.dtype) + shifts = xp.asarray(shifts, dtype=self.dtype) L = self.resolution - im_f = xp.asnumpy(fft.fft2(xp.asarray(im))) # todo + im_f = fft.fft2(xp.asarray(im)) grid_shifted = fft.ifftshift( - xp.asarray(np.ceil(np.arange(-L / 2, L / 2, dtype=self.dtype))) + xp.ceil(xp.arange(-L / 2, L / 2, dtype=self.dtype)) ) - grid_1d = xp.asnumpy(grid_shifted) * 2 * np.pi / L - om_x, om_y = np.meshgrid(grid_1d, grid_1d, indexing="ij") + grid_1d = grid_shifted * 2 * xp.pi / L + om_x, om_y = xp.meshgrid(grid_1d, grid_1d, indexing="ij") phase_shifts_x = -shifts[:, 0].reshape((n_shifts, 1, 1)) phase_shifts_y = -shifts[:, 1].reshape((n_shifts, 1, 1)) @@ -512,9 +512,9 @@ def _im_translate(self, shifts): om_x[np.newaxis, :, :] * phase_shifts_x + om_y[np.newaxis, :, :] * phase_shifts_y ) - mult_f = np.exp(-1j * phase_shifts) + mult_f = xp.exp(-1j * phase_shifts) im_translated_f = im_f * mult_f - im_translated = fft.ifft2(xp.asarray(im_translated_f)) + im_translated = fft.ifft2(im_translated_f) im_translated = xp.asnumpy(im_translated.real) # Reshape to stack shape From 0b877b5c43fe3108d31fb5fcfc8d71e97bf736be Mon Sep 17 00:00:00 2001 From: Garrett Wright Date: Mon, 24 Jun 2024 14:40:29 -0400 Subject: [PATCH 075/139] misc xp/numeric wrapper cleanup --- src/aspire/nufft/__init__.py | 12 ++++++------ src/aspire/numeric/cupy_fft.py | 12 ++++++++++-- src/aspire/numeric/numpy.py | 4 ++++ src/aspire/numeric/pyfftw_fft.py | 8 ++++---- src/aspire/numeric/scipy_fft.py | 8 ++++---- 5 files changed, 28 insertions(+), 16 deletions(-) diff --git a/src/aspire/nufft/__init__.py b/src/aspire/nufft/__init__.py index c2db4f2e8e..953f55ce3e 100644 --- a/src/aspire/nufft/__init__.py +++ b/src/aspire/nufft/__init__.py @@ -172,9 +172,9 @@ def anufft(sig_f, fourier_pts, sz, real=False, epsilon=1e-8): """ - on_gpu = False + _on_gpu = False if cp and isinstance(sig_f, cp.ndarray): - on_gpu = True + _on_gpu = True if fourier_pts.dtype != real_type(sig_f.dtype): raise RuntimeError( @@ -198,7 +198,7 @@ def anufft(sig_f, fourier_pts, sz, real=False, epsilon=1e-8): adjoint = adjoint.real if real else adjoint - if cp and not on_gpu: + if cp and not _on_gpu: adjoint = adjoint.get() return adjoint @@ -223,9 +223,9 @@ def nufft(sig_f, fourier_pts, real=False, epsilon=1e-8): """ - on_gpu = False + _on_gpu = False if cp and isinstance(sig_f, cp.ndarray): - on_gpu = True + _on_gpu = True if fourier_pts.dtype != real_type(sig_f.dtype): raise RuntimeError( @@ -259,7 +259,7 @@ def nufft(sig_f, fourier_pts, real=False, epsilon=1e-8): transform = transform.real if real else transform - if cp and not on_gpu: + if cp and not _on_gpu: transform = transform.get() return transform diff --git a/src/aspire/numeric/cupy_fft.py b/src/aspire/numeric/cupy_fft.py index 043780c7a7..f67937813f 100644 --- a/src/aspire/numeric/cupy_fft.py +++ b/src/aspire/numeric/cupy_fft.py @@ -7,11 +7,16 @@ from aspire.numeric.base_fft import FFT +# This improves the flexibility of our FFT wrappers by allowing for +# incremental code changes and testing. def _preserve_host(func): """ - Method decorator that returns a numpy/cupy array result when passed a numpy/cupy array input. + Method decorator that returns a numpy/cupy array result when + passed a numpy/cupy array input respectively. - This improves the flexibility of our FFT wrappers by allowing for incremental code changes. + At the time of writing this wrapper will also upcast cupy FFT + operations to doubles as the precision in singles can cause + accuracy issues. """ @functools.wraps(func) # Pass metadata (eg name and doctrings) from `func` @@ -20,6 +25,9 @@ def wrapper(self, x, *args, **kwargs): # CuPy's single precision FFT appears to be too inaccurate for # many of our unit tests, so the signal is upcast and recast # on return. + # Todo, discuss with Joakim whether we want this upcasting + # business configurable or keep singles, both in conjunction + # with xfailing the tests. _singles = False if x.dtype == np.float32: _singles = True diff --git a/src/aspire/numeric/numpy.py b/src/aspire/numeric/numpy.py index 07627399f9..ddc8355816 100644 --- a/src/aspire/numeric/numpy.py +++ b/src/aspire/numeric/numpy.py @@ -8,8 +8,12 @@ class Numpy: + # This can be required when mixing nufft/fft/numpy backend combinations. @staticmethod def asnumpy(x): + """ + Ensure `asnumpy` is always available and returns a numpy array. + """ if cp and isinstance(x, cp.ndarray): x = x.get() return x diff --git a/src/aspire/numeric/pyfftw_fft.py b/src/aspire/numeric/pyfftw_fft.py index afcad98d28..95a8ea80f7 100644 --- a/src/aspire/numeric/pyfftw_fft.py +++ b/src/aspire/numeric/pyfftw_fft.py @@ -160,8 +160,8 @@ def fftshift(self, a, axes=None): def ifftshift(self, a, axes=None): return scipy_fft.ifftshift(a, axes=axes) - def dct(self, *args, **kwargs): - return scipy_fft.dct(*args, **kwargs) + def dct(self, x, **kwargs): + return scipy_fft.dct(x, **kwargs) - def idct(self, *args, **kwargs): - return scipy_fft.idct(*args, **kwargs) + def idct(self, x, **kwargs): + return scipy_fft.idct(x, **kwargs) diff --git a/src/aspire/numeric/scipy_fft.py b/src/aspire/numeric/scipy_fft.py index d78e463803..3891d45671 100644 --- a/src/aspire/numeric/scipy_fft.py +++ b/src/aspire/numeric/scipy_fft.py @@ -34,8 +34,8 @@ def fftshift(self, x, axes=None): def ifftshift(self, x, axes=None): return sp.fft.ifftshift(x, axes=axes) - def dct(self, *args, **kwargs): - return sp.fft.dct(*args, **kwargs) + def dct(self, x, **kwargs): + return sp.fft.dct(x, **kwargs) - def idct(self, *args, **kwargs): - return sp.fft.idct(*args, **kwargs) + def idct(self, x, **kwargs): + return sp.fft.idct(x, **kwargs) From 5324e7f69547b27ff6dda6cb4ad317d4f7a9a06f Mon Sep 17 00:00:00 2001 From: Garrett Wright Date: Wed, 26 Jun 2024 14:20:20 -0400 Subject: [PATCH 076/139] precache fle x y grids on gpu --- src/aspire/basis/fle_2d.py | 22 ++++++++++------------ src/aspire/nufft/cufinufft.py | 14 +++++--------- 2 files changed, 15 insertions(+), 21 deletions(-) diff --git a/src/aspire/basis/fle_2d.py b/src/aspire/basis/fle_2d.py index e82dbb5d2f..2ca41a2994 100644 --- a/src/aspire/basis/fle_2d.py +++ b/src/aspire/basis/fle_2d.py @@ -308,12 +308,13 @@ def _compute_nufft_points(self): * np.arange(self.num_angular_nodes // 2, dtype=self.dtype) / self.num_angular_nodes ) - x = np.cos(phi).reshape(1, self.num_angular_nodes // 2) - y = np.sin(phi).reshape(1, self.num_angular_nodes // 2) - x = x * nodes * h - y = y * nodes * h - self.grid_x = x.flatten() - self.grid_y = y.flatten() + grid_xy = np.empty( + (2, self.num_radial_nodes, self.num_angular_nodes // 2), dtype=self.dtype + ) + grid_xy[0] = np.cos(phi) # x + grid_xy[1] = np.sin(phi) # y + grid_xy *= nodes * h + self.grid_xy = xp.asarray(grid_xy.reshape(2, -1)) def _build_interpolation_matrix(self): """ @@ -531,7 +532,7 @@ def _evaluate_t(self, imgs): def _step1_t(self, im): """ Step 1 of the adjoint transformation (images to coefficients). - Calculates the NUFFT of the image on gridpoints `self.grid_x` and `self.grid_y`. + Calculates the NUFFT of the image on gridpoints `grid_x` and `grid_y`. """ im = im.reshape(-1, self.nres, self.nres).astype(complex_type(self.dtype)) num_img = im.shape[0] @@ -539,10 +540,7 @@ def _step1_t(self, im): (num_img, self.num_radial_nodes, self.num_angular_nodes), dtype=complex_type(self.dtype), ) - _z = ( - nufft(im, np.stack((self.grid_x, self.grid_y)), epsilon=self.epsilon) - * self.h**2 - ) + _z = nufft(im, self.grid_xy, epsilon=self.epsilon) * self.h**2 _z = _z.reshape(num_img, self.num_radial_nodes, self.num_angular_nodes // 2) z[:, :, : self.num_angular_nodes // 2] = _z z[:, :, self.num_angular_nodes // 2 :] = np.conj(_z) @@ -645,7 +643,7 @@ def _step1(self, z): z = z[:, :, : self.num_angular_nodes // 2].reshape(num_img, -1) im = anufft( z.astype(complex_type(self.dtype)), - np.stack((self.grid_x, self.grid_y)), + self.grid_xy, (self.nres, self.nres), epsilon=self.epsilon, ) diff --git a/src/aspire/nufft/cufinufft.py b/src/aspire/nufft/cufinufft.py index 2dceb08b80..c1d15ff686 100644 --- a/src/aspire/nufft/cufinufft.py +++ b/src/aspire/nufft/cufinufft.py @@ -51,11 +51,11 @@ def __init__(self, sz, fourier_pts, epsilon=1e-8, ntransforms=1, **kwargs): "cufinufft has caught a non C_CONTIGUOUS array," " `fourier_pts` will be copied to C_CONTIGUOUS." ) - self.fourier_pts = np.ascontiguousarray( - np.mod(fourier_pts + np.pi, 2 * np.pi) - np.pi, dtype=self.dtype + self.fourier_pts = cp.ascontiguousarray( + cp.mod(cp.asarray(fourier_pts, dtype=self.dtype) + cp.pi, 2 * cp.pi) - cp.pi ) - self.num_pts = fourier_pts.shape[1] + self.num_pts = self.fourier_pts.shape[1] self.epsilon = max(epsilon, np.finfo(self.dtype).eps) self._transform_plan = cufPlan( @@ -81,12 +81,8 @@ def __init__(self, sz, fourier_pts, epsilon=1e-8, ntransforms=1, **kwargs): **self.adjoint_opts, ) - # Note, I store self.fourier_pts_gpu so the GPUArrray life - # is tied to instance, instead of this method. - self.fourier_pts_gpu = cp.array(self.fourier_pts) - - self._transform_plan.setpts(*self.fourier_pts_gpu) - self._adjoint_plan.setpts(*self.fourier_pts_gpu) + self._transform_plan.setpts(*self.fourier_pts) + self._adjoint_plan.setpts(*self.fourier_pts) def transform(self, signal): """ From afbc468c559f372a2fae3188a63ff5b26c8f0ed1 Mon Sep 17 00:00:00 2001 From: Garrett Wright Date: Thu, 27 Jun 2024 15:32:28 -0400 Subject: [PATCH 077/139] Rm unneeded gc call --- src/aspire/basis/fle_2d.py | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/src/aspire/basis/fle_2d.py b/src/aspire/basis/fle_2d.py index 2ca41a2994..ba7d4636e9 100644 --- a/src/aspire/basis/fle_2d.py +++ b/src/aspire/basis/fle_2d.py @@ -1,4 +1,3 @@ -import gc import logging import numpy as np @@ -21,9 +20,8 @@ def _cleanup(): """ - Utility for informing python+cupy to cleanup memory held by old vars. + Utility for informing cupy to cleanup memory held by old vars. """ - gc.collect() try: import cupy @@ -511,18 +509,18 @@ def _evaluate_t(self, imgs): coefficients. """ # See Section 3.5 - imgs = xp.array(imgs) # Copy here, mutating. + imgs = xp.array(imgs) # Intentionally copying here, mutating. imgs[:, self.radial_mask] = 0 z = self._step1_t(imgs) - del imgs + del imgs # inform python we're done with imgs _cleanup() b = self._step2_t(z) - del z + del z # inform python we're done with z _cleanup() coefs = self._step3_t(b) - del b + del b # inform python we're done with b _cleanup() # return in FB order From 8526aa7abc3acf3fd4dd829e18b5efb4e5620d0b Mon Sep 17 00:00:00 2001 From: Garrett Wright Date: Fri, 28 Jun 2024 08:50:25 -0400 Subject: [PATCH 078/139] Add cupy GPU options to config tutorial --- gallery/tutorials/configuration.py | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/gallery/tutorials/configuration.py b/gallery/tutorials/configuration.py index 819ff9b675..75354bc422 100644 --- a/gallery/tutorials/configuration.py +++ b/gallery/tutorials/configuration.py @@ -102,6 +102,36 @@ time.sleep(1) print("Done Loop 2\n") +# %% +# Enabling GPU Acceleration +# ------------------------- +# Enabling GPU acceleration requires installing supporting software +# packages and small config changes. Installing the supporting +# software is most easily accomplished by installing ASPIRE with one +# of the published GPU extensions, for example ``pip install +# "aspire[dev,gpu_12x]"``. Once the packages are installed users +# should automatically find that the NUFFT calls are running on the +# GPU. Additional acceleration is achieved by enabling `cupy` for +# `numeric` and `fft` components. +# +# .. code-block:: yaml +# +# common: +# # numeric module to use - one of numpy/cupy +# numeric: cupy +# # fft backend to use - one of pyfftw/scipy/cupy/mkl +# fft: cupy +# +# Alternatively, like other config options, this can be changed +# dynamically with code. +# +# .. code-block:: python +# +# from aspire import config +# +# config["common"]["numeric"] = "cupy" +# config["common"]["fft"] = "cupy" +# # %% # Resolution From 2ef3cd3a8f5cf2b85c9a2516343adab1bd674339 Mon Sep 17 00:00:00 2001 From: Garrett Wright Date: Fri, 28 Jun 2024 09:12:55 -0400 Subject: [PATCH 079/139] update GPU install docs --- docs/source/installation.rst | 31 +++++++++++++------------------ 1 file changed, 13 insertions(+), 18 deletions(-) diff --git a/docs/source/installation.rst b/docs/source/installation.rst index 4a48e3a505..5fa608ecdf 100644 --- a/docs/source/installation.rst +++ b/docs/source/installation.rst @@ -129,10 +129,10 @@ an M1 laptop: Installing GPU Extensions ************************* -ASPIRE does support GPUs, depending on several external packages. The -collection of GPU extensions can be installed using ``pip``. -Extensions are grouped based on CUDA versions. To find the CUDA -driver version, run ``nvidia-smi`` on the intended system. +ASPIRE does support using a GPU, depending on several external +packages. The collection of GPU extensions can be installed using +``pip``. Extensions are grouped based on CUDA versions. To find the +CUDA driver version, run ``nvidia-smi`` on the intended system. .. list-table:: CUDA GPU Extension Versions :widths: 25 25 @@ -140,14 +140,6 @@ driver version, run ``nvidia-smi`` on the intended system. * - CUDA Version - ASPIRE Extension - * - 10.2 - - gpu-102 - * - 11.0 - - gpu-110 - * - 11.1 - - gpu-111 - * - >=11.2 - - gpu-11x * - >=12 - gpu-12x @@ -164,12 +156,15 @@ the command below would install GPU packages required for ASPIRE. By default if the required GPU extensions are correctly installed, -ASPIRE should automatically begin using the GPU for select components -(such as those using ``nufft``). - -Because GPU extensions depend on several third party packages and -libraries, we can only offer limited support if one of the packages -has a problem on your system. +ASPIRE should automatically begin using the GPU calls to our ``nufft`` module. + +Using GPU in other areas of the code is still an experimental feature +and requires a minor configuration setting to enable ``cupy``. See the +:ref:`sphx_glr_auto_tutorials_configuration.py` for details. Because +GPU extensions depend on several third party softwares and machines +vary wildly, we can only offer limited support if one of the packages +has a problem on your system. We are currently expanding GPU code +coverage. Generating Documentation ************************ From 35797878fb24e25f2a0396e3d4b094b5c880c417 Mon Sep 17 00:00:00 2001 From: Garrett Wright Date: Fri, 28 Jun 2024 10:14:57 -0400 Subject: [PATCH 080/139] improve crop 3d xp interop --- src/aspire/utils/coor_trans.py | 63 ++++++++++++++++++++++++---------- 1 file changed, 45 insertions(+), 18 deletions(-) diff --git a/src/aspire/utils/coor_trans.py b/src/aspire/utils/coor_trans.py index ef7857c994..457e29f9f8 100644 --- a/src/aspire/utils/coor_trans.py +++ b/src/aspire/utils/coor_trans.py @@ -376,7 +376,7 @@ def crop_pad_2d(im, size, fill_value=0): :param im: A >=2-dimensional numpy array :param size: Integer size of cropped/padded output - :return: A numpy array of shape (..., size, size) + :return: Array of shape (..., size, size) """ im_y, im_x = im.shape[-2:] @@ -393,7 +393,7 @@ def crop_pad_2d(im, size, fill_value=0): shape = list(im.shape[:-2]) shape.extend([size, size]) - # Ensure that we return in the same dtype as the input + # Ensure that we return the same dtype as the input _full = np.full # Default to numpy array if isinstance(im, xp.ndarray): # Use cupy when `im` _and_ xp are cupy ndarray @@ -405,34 +405,61 @@ def crop_pad_2d(im, size, fill_value=0): # when padding, start_x and start_y are negative since size is larger # than im_x and im_y; the below line calculates where the original image # is placed in relation to the (now-larger) box size - to_return[-start_y : im_y - start_y, -start_x : im_x - start_x] = im + to_return[..., -start_y : im_y - start_y, -start_x : im_x - start_x] = im return to_return else: # target size is between mat_x and mat_y raise ValueError("Cannot crop and pad an image at the same time.") -def crop_pad_3d(im, size, fill_value=0): - im_y, im_x, im_z = im.shape +def crop_pad_3d(vol, size, fill_value=0): + """ + Crop/pads `vol` according to `size`. + + Padding will use `fill_value`. + Return's host/gpu array based on `vol`. + + :param vol: A >=3-dimensional numpy array + :param size: Integer size of cropped/padded output + :return: Array of shape (..., size, size, size) + """ + + vol_z, vol_y, vol_x = vol.shape[-3:] # shift terms - start_x = math.floor(im_x / 2) - math.floor(size / 2) - start_y = math.floor(im_y / 2) - math.floor(size / 2) - start_z = math.floor(im_z / 2) - math.floor(size / 2) + start_z = math.floor(vol_z / 2) - math.floor(size / 2) + start_y = math.floor(vol_y / 2) - math.floor(size / 2) + start_x = math.floor(vol_x / 2) - math.floor(size / 2) # cropping - if size <= min(im_y, im_x, im_z): - return im[ - start_y : start_y + size, start_x : start_x + size, start_z : start_z + size + if size <= min(vol_z, vol_y, vol_x): + return vol[ + ..., + start_z : start_z + size, + start_y : start_y + size, + start_x : start_x + size, ] # padding - elif size >= max(im_y, im_x, im_z): - to_return = fill_value * np.ones((size, size, size), dtype=im.dtype) + elif size >= max(vol_z, vol_y, vol_x): + # Determine shape + shape = list(vol.shape[:-3]) + shape.extend([size, size, size]) + + # Ensure that we return the same dtype as the input + _full = np.full # Default to numpy array + if isinstance(vol, xp.ndarray): + # Use cupy when `vol` _and_ xp are cupy ndarray + # Avoids having to handle when cupy is not installed + _full = xp.full + + to_return = _full(shape, fill_value, dtype=vol.dtype) + to_return[ - -start_y : im_y - start_y, - -start_x : im_x - start_x, - -start_z : im_z - start_z, - ] = im + ..., + -start_z : vol_z - start_z, + -start_y : vol_y - start_y, + -start_x : vol_x - start_x, + ] = vol return to_return else: - # target size is between min and max of (im_y, im_x, im_z) + # target size is between min and max of (vol_y, vol_x, vol_z) raise ValueError("Cannot crop and pad a volume at the same time.") From f35ad5267932abab323dd7dd285f4896b6eb9d9f Mon Sep 17 00:00:00 2001 From: Garrett Wright Date: Mon, 1 Jul 2024 11:13:04 -0400 Subject: [PATCH 081/139] ffb2d self review cleanup --- src/aspire/basis/ffb_2d.py | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/src/aspire/basis/ffb_2d.py b/src/aspire/basis/ffb_2d.py index 116e009757..8d46e8419c 100644 --- a/src/aspire/basis/ffb_2d.py +++ b/src/aspire/basis/ffb_2d.py @@ -131,16 +131,16 @@ def _evaluate(self, v): ind = 0 - idx = ind + xp.arange(self.k_max[0], dtype=int) + idx = ind + np.arange(self.k_max[0], dtype=int) - pf[:, 0, :] = v[:, xp.asarray(self._zero_angular_inds)] @ self.radial_norm[idx] + pf[:, 0, :] = v[:, self._zero_angular_inds] @ self.radial_norm[idx] ind = ind + idx.size ind_pos = ind for ell in range(1, self.ell_max + 1): idx = ind + xp.arange(self.k_max[ell], dtype=int) - idx_pos = ind_pos + xp.arange(self.k_max[ell], dtype=int) + idx_pos = ind_pos + np.arange(self.k_max[ell], dtype=int) idx_neg = idx_pos + self.k_max[ell] v_ell = (v[:, idx_pos] - 1j * v[:, idx_neg]) / 2.0 @@ -171,9 +171,7 @@ def _evaluate(self, v): # perform inverse non-uniformly FFT transform back to 2D coordinate basis freqs = m_reshape(self._precomp["freqs"], (2, n_r * n_theta)) - x = 2 * anufft( - pf.astype(complex_type(self.dtype)), 2 * pi * freqs, self.sz, real=True - ) + x = 2 * anufft(pf, 2 * pi * freqs, self.sz, real=True) # Return X as Image instance with the last two dimensions as *self.sz x = x.reshape((*sz_roll, *self.sz)) @@ -206,7 +204,7 @@ def _evaluate_t(self, x): pf = xp.concatenate((pf, pf.conjugate()), axis=2) # evaluate radial integral using the Gauss-Legendre quadrature rule - pf *= self.gl_weighted_nodes[None, :, None] + pf = pf * self.gl_weighted_nodes[None, :, None] # 1D FFT on the angular dimension for each concentric circle pf = 2 * xp.pi / (2 * n_theta) * fft.fft(pf) From e44840240ce7ce665c7d22adc7b8357ac1dd557c Mon Sep 17 00:00:00 2001 From: Garrett Wright Date: Mon, 1 Jul 2024 14:43:57 -0400 Subject: [PATCH 082/139] ffb3d move more grid precomp to gpu --- src/aspire/basis/ffb_3d.py | 76 +++++++++++++++++++------------------- 1 file changed, 39 insertions(+), 37 deletions(-) diff --git a/src/aspire/basis/ffb_3d.py b/src/aspire/basis/ffb_3d.py index 5740900a34..7a2509382f 100644 --- a/src/aspire/basis/ffb_3d.py +++ b/src/aspire/basis/ffb_3d.py @@ -1,7 +1,6 @@ import logging import numpy as np -from numpy import pi from aspire.basis import FBBasis3D from aspire.basis.basis_utils import lgwt, norm_assoc_legendre, sph_bessel @@ -61,26 +60,29 @@ def _precomp(self): r, wt_r = lgwt(n_r, 0.0, self.kcut, dtype=self.dtype) z, wt_z = lgwt(n_phi, -1, 1, dtype=self.dtype) - r = m_reshape(r, (n_r, 1)) - wt_r = m_reshape(wt_r, (n_r, 1)) - z = m_reshape(z, (n_phi, 1)) - wt_z = m_reshape(wt_z, (n_phi, 1)) - phi = np.arccos(z) + r = m_reshape(xp.asarray(r), (n_r, 1)) + rh = xp.asnumpy(r) + wt_r = m_reshape(xp.asarray(wt_r), (n_r, 1)) + z = m_reshape(xp.asarray(z), (n_phi, 1)) + wt_z = m_reshape(xp.asarray(wt_z), (n_phi, 1)) + phi = xp.arccos(z) wt_phi = wt_z - theta = 2 * pi * np.arange(n_theta, dtype=self.dtype).T / (2 * n_theta) + theta = 2 * xp.pi * xp.arange(n_theta, dtype=self.dtype).T / (2 * n_theta) theta = m_reshape(theta, (n_theta, 1)) # evaluate basis function in the radial dimension - radial_wtd = np.zeros( + radial_wtd = xp.zeros( shape=(n_r, np.max(self.k_max), self.ell_max + 1), dtype=self.dtype ) for ell in range(0, self.ell_max + 1): k_max_ell = self.k_max[ell] - rmat = r * self.r0[ell][0:k_max_ell].T / self.kcut - radial_ell = np.zeros_like(rmat) + rmat = rh * self.r0[ell][0:k_max_ell].T / self.kcut # host + radial_ell = xp.zeros_like(rmat) for ik in range(0, k_max_ell): - radial_ell[:, ik] = sph_bessel(ell, rmat[:, ik]) - nrm = np.abs(sph_bessel(ell + 1, self.r0[ell][0:k_max_ell].T) / 4) + radial_ell[:, ik] = xp.asarray(sph_bessel(ell, rmat[:, ik])) + nrm = xp.abs( + xp.asarray(sph_bessel(ell + 1, self.r0[ell][0:k_max_ell].T)) / 4 + ) radial_ell = radial_ell / nrm radial_ell_wtd = r**2 * wt_r * radial_ell radial_wtd[:, 0:k_max_ell, ell] = radial_ell_wtd @@ -95,14 +97,14 @@ def _precomp(self): - np.mod(self.ell_max, 2) * np.mod(m, 2) ) n_odd_ell = int(self.ell_max - m + 1 - n_even_ell) - phi_wtd_m_even = np.zeros((n_phi, n_even_ell), dtype=phi.dtype) - phi_wtd_m_odd = np.zeros((n_phi, n_odd_ell), dtype=phi.dtype) + phi_wtd_m_even = xp.zeros((n_phi, n_even_ell), dtype=phi.dtype) + phi_wtd_m_odd = xp.zeros((n_phi, n_odd_ell), dtype=phi.dtype) ind_even = 0 ind_odd = 0 for ell in range(m, self.ell_max + 1): - phi_m_ell = norm_assoc_legendre(ell, m, z) - nrm_inv = np.sqrt(0.5 / pi) + phi_m_ell = xp.asarray(norm_assoc_legendre(ell, m, z)) + nrm_inv = np.sqrt(0.5 / np.pi) phi_m_ell = nrm_inv * phi_m_ell phi_wtd_m_ell = wt_phi * phi_m_ell if np.mod(ell, 2) == 0: @@ -116,41 +118,41 @@ def _precomp(self): ang_phi_wtd_odd.append(phi_wtd_m_odd) # evaluate basis function in the theta dimension - ang_theta = np.zeros((n_theta, 2 * self.ell_max + 1), dtype=theta.dtype) + ang_theta = xp.zeros((n_theta, 2 * self.ell_max + 1), dtype=theta.dtype) - ang_theta[:, 0 : self.ell_max] = np.sqrt(2) * np.sin( - theta @ m_reshape(np.arange(self.ell_max, 0, -1), (1, self.ell_max)) + ang_theta[:, 0 : self.ell_max] = np.sqrt(2) * xp.sin( + theta @ m_reshape(xp.arange(self.ell_max, 0, -1), (1, self.ell_max)) ) - ang_theta[:, self.ell_max] = np.ones(n_theta, dtype=theta.dtype) - ang_theta[:, self.ell_max + 1 : 2 * self.ell_max + 1] = np.sqrt(2) * np.cos( - theta @ m_reshape(np.arange(1, self.ell_max + 1), (1, self.ell_max)) + ang_theta[:, self.ell_max] = xp.ones(n_theta, dtype=theta.dtype) + ang_theta[:, self.ell_max + 1 : 2 * self.ell_max + 1] = np.sqrt(2) * xp.cos( + theta @ m_reshape(xp.arange(1, self.ell_max + 1), (1, self.ell_max)) ) - ang_theta_wtd = (2 * pi / n_theta) * ang_theta + ang_theta_wtd = (2 * np.pi / n_theta) * ang_theta - theta_grid, phi_grid, r_grid = np.meshgrid( - theta, phi, r, sparse=False, indexing="ij" + theta_grid, phi_grid, r_grid = xp.meshgrid( + theta.flatten(), phi.flatten(), r.flatten(), sparse=False, indexing="ij" ) - fourier_x = m_flatten(r_grid * np.cos(theta_grid) * np.sin(phi_grid)) - fourier_y = m_flatten(r_grid * np.sin(theta_grid) * np.sin(phi_grid)) - fourier_z = m_flatten(r_grid * np.cos(phi_grid)) + fourier_x = m_flatten(r_grid * xp.cos(theta_grid) * xp.sin(phi_grid)) + fourier_y = m_flatten(r_grid * xp.sin(theta_grid) * xp.sin(phi_grid)) + fourier_z = m_flatten(r_grid * xp.cos(phi_grid)) fourier_pts = ( 2 - * pi - * np.vstack( + * xp.pi + * xp.vstack( ( - fourier_z[np.newaxis, ...], - fourier_y[np.newaxis, ...], - fourier_x[np.newaxis, ...], + fourier_z[xp.newaxis, ...], + fourier_y[xp.newaxis, ...], + fourier_x[xp.newaxis, ...], ) ) ) return { - "radial_wtd": xp.asarray(radial_wtd), - "ang_phi_wtd_even": [xp.asarray(x) for x in ang_phi_wtd_even], - "ang_phi_wtd_odd": [xp.asarray(x) for x in ang_phi_wtd_odd], - "ang_theta_wtd": xp.asarray(ang_theta_wtd), + "radial_wtd": radial_wtd, + "ang_phi_wtd_even": ang_phi_wtd_even, + "ang_phi_wtd_odd": ang_phi_wtd_odd, + "ang_theta_wtd": ang_theta_wtd, "fourier_pts": fourier_pts, } From 286301e4d1672668cea68e8bf9fc62d9efd29c0c Mon Sep 17 00:00:00 2001 From: Garrett Wright Date: Mon, 1 Jul 2024 14:58:08 -0400 Subject: [PATCH 083/139] Move more FLE2D grid precomp to GPU --- src/aspire/basis/fle_2d.py | 23 +++++++++++++---------- src/aspire/basis/fle_2d_utils.py | 1 - 2 files changed, 13 insertions(+), 11 deletions(-) diff --git a/src/aspire/basis/fle_2d.py b/src/aspire/basis/fle_2d.py index ba7d4636e9..631161d0fe 100644 --- a/src/aspire/basis/fle_2d.py +++ b/src/aspire/basis/fle_2d.py @@ -21,6 +21,9 @@ def _cleanup(): """ Utility for informing cupy to cleanup memory held by old vars. + + This method is designed to be safely called even when `CuPy` is + not installed, in which case it is a no-op. """ try: import cupy @@ -288,10 +291,10 @@ def _compute_nufft_points(self): self.num_angular_nodes = num_angular_nodes # create gridpoints - nodes = 1 - (2 * np.arange(self.num_radial_nodes, dtype=self.dtype) + 1) / ( + nodes = 1 - (2 * xp.arange(self.num_radial_nodes, dtype=self.dtype) + 1) / ( 2 * self.num_radial_nodes ) - nodes = (np.cos(np.pi * nodes) + 1) / 2 + nodes = (xp.cos(np.pi * nodes) + 1) / 2 nodes = ( self.greatest_lambda - self.smallest_lambda ) * nodes + self.smallest_lambda @@ -302,17 +305,17 @@ def _compute_nufft_points(self): phi = ( 2 - * np.pi - * np.arange(self.num_angular_nodes // 2, dtype=self.dtype) + * xp.pi + * xp.arange(self.num_angular_nodes // 2, dtype=self.dtype) / self.num_angular_nodes ) - grid_xy = np.empty( + grid_xy = xp.empty( (2, self.num_radial_nodes, self.num_angular_nodes // 2), dtype=self.dtype ) - grid_xy[0] = np.cos(phi) # x - grid_xy[1] = np.sin(phi) # y - grid_xy *= nodes * h - self.grid_xy = xp.asarray(grid_xy.reshape(2, -1)) + grid_xy[0] = xp.cos(phi) # x + grid_xy[1] = xp.sin(phi) # y + grid_xy = grid_xy * nodes * h + self.grid_xy = grid_xy.reshape(2, -1) def _build_interpolation_matrix(self): """ @@ -530,7 +533,7 @@ def _evaluate_t(self, imgs): def _step1_t(self, im): """ Step 1 of the adjoint transformation (images to coefficients). - Calculates the NUFFT of the image on gridpoints `grid_x` and `grid_y`. + Calculates the NUFFT of the image on gridpoints `grid_xy`. """ im = im.reshape(-1, self.nres, self.nres).astype(complex_type(self.dtype)) num_img = im.shape[0] diff --git a/src/aspire/basis/fle_2d_utils.py b/src/aspire/basis/fle_2d_utils.py index 33f237165e..ea459988b0 100644 --- a/src/aspire/basis/fle_2d_utils.py +++ b/src/aspire/basis/fle_2d_utils.py @@ -195,7 +195,6 @@ def barycentric_interp_sparse(target_points, known_points, numsparse): # note that const cancels in numerator and denominator vals = vals / denom.reshape(-1, 1) - # TODO, migrate more of this method towards `xp` vals = xp.array(vals.flatten()) idx = xp.array(idx.flatten()) jdx = xp.array(jdx.flatten()) From db0e7d651503e37fbc4165ac9118ddfa90c851f2 Mon Sep 17 00:00:00 2001 From: Garrett Wright Date: Mon, 1 Jul 2024 15:05:58 -0400 Subject: [PATCH 084/139] image self review cleanup --- src/aspire/image/image.py | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/src/aspire/image/image.py b/src/aspire/image/image.py index 8ad8dc8711..5a7bde2374 100644 --- a/src/aspire/image/image.py +++ b/src/aspire/image/image.py @@ -392,6 +392,10 @@ def downsample(self, ds_res): original_stack_shape = self.stack_shape im = self.stack_reshape(-1) + # Note image data is intentionally migrated via `xp.asarray` + # because all of the subsequent calls until `asnumpy` are GPU + # when xp and fft in `cupy` mode. + # compute FT with centered 0-frequency fx = fft.centered_fft2(xp.asarray(im._data)) # crop 2D Fourier transform for each image @@ -413,17 +417,16 @@ def filter(self, filter): im = self.stack_reshape(-1) + # Note image and filter data is intentionally migrated via + # `xp.asarray` because all of the subsequent calls until + # `asnumpy` are GPU when xp and fft in `cupy` mode. filter_values = xp.asarray(filter.evaluate_grid(self.resolution)) + # Convolve im_f = fft.centered_fft2(xp.asarray(im._data)) - - # TODO: why are these different? Doesn't the broadcast work? - if im_f.ndim > filter_values.ndim: - im_f *= filter_values - else: - im_f = filter_values * im_f - + im_f = filter_values * im_f im = fft.centered_ifft2(im_f) + im = xp.asnumpy(im.real) return self.__class__(im).stack_reshape(original_stack_shape) From 235979c0c04cb2423ced2498e698bf3bc6ccb486 Mon Sep 17 00:00:00 2001 From: Garrett Wright Date: Mon, 1 Jul 2024 15:10:56 -0400 Subject: [PATCH 085/139] var name improvement --- src/aspire/nufft/__init__.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/aspire/nufft/__init__.py b/src/aspire/nufft/__init__.py index 953f55ce3e..fcfe182918 100644 --- a/src/aspire/nufft/__init__.py +++ b/src/aspire/nufft/__init__.py @@ -172,9 +172,9 @@ def anufft(sig_f, fourier_pts, sz, real=False, epsilon=1e-8): """ - _on_gpu = False + _keep_on_gpu = False if cp and isinstance(sig_f, cp.ndarray): - _on_gpu = True + _keep_on_gpu = True if fourier_pts.dtype != real_type(sig_f.dtype): raise RuntimeError( @@ -198,7 +198,7 @@ def anufft(sig_f, fourier_pts, sz, real=False, epsilon=1e-8): adjoint = adjoint.real if real else adjoint - if cp and not _on_gpu: + if cp and not _keep_on_gpu: adjoint = adjoint.get() return adjoint @@ -223,9 +223,9 @@ def nufft(sig_f, fourier_pts, real=False, epsilon=1e-8): """ - _on_gpu = False + _keep_on_gpu = False if cp and isinstance(sig_f, cp.ndarray): - _on_gpu = True + _keep_on_gpu = True if fourier_pts.dtype != real_type(sig_f.dtype): raise RuntimeError( @@ -259,7 +259,7 @@ def nufft(sig_f, fourier_pts, real=False, epsilon=1e-8): transform = transform.real if real else transform - if cp and not _on_gpu: + if cp and not _keep_on_gpu: transform = transform.get() return transform From 4f6ca0aee96ba970031c5f85b83ca80823edf7d9 Mon Sep 17 00:00:00 2001 From: Garrett Wright Date: Mon, 1 Jul 2024 15:16:42 -0400 Subject: [PATCH 086/139] minor crop pad string improvements --- src/aspire/utils/coor_trans.py | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/src/aspire/utils/coor_trans.py b/src/aspire/utils/coor_trans.py index 457e29f9f8..53f86714c8 100644 --- a/src/aspire/utils/coor_trans.py +++ b/src/aspire/utils/coor_trans.py @@ -409,7 +409,11 @@ def crop_pad_2d(im, size, fill_value=0): return to_return else: # target size is between mat_x and mat_y - raise ValueError("Cannot crop and pad an image at the same time.") + raise ValueError( + "Cannot crop and pad Image at the same time." + "If this is really what you intended," + " make two seperate calls for cropping and padding." + ) def crop_pad_3d(vol, size, fill_value=0): @@ -461,5 +465,9 @@ def crop_pad_3d(vol, size, fill_value=0): ] = vol return to_return else: - # target size is between min and max of (vol_y, vol_x, vol_z) - raise ValueError("Cannot crop and pad a volume at the same time.") + # target size is between min and max of (vol_x, vol_y, vol_z) + raise ValueError( + "Cannot crop and pad Volume at the same time." + "If this is really what you intended," + " make two seperate calls for cropping and padding." + ) From 6fa1ec609ffb3d81aeaecfa826fe71b8a1d5d024 Mon Sep 17 00:00:00 2001 From: Garrett Wright Date: Mon, 1 Jul 2024 15:23:54 -0400 Subject: [PATCH 087/139] Update volume downsample with crop_pad_3d improvements --- src/aspire/volume/volume.py | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/src/aspire/volume/volume.py b/src/aspire/volume/volume.py index b7e4245ede..0f01ef5e61 100644 --- a/src/aspire/volume/volume.py +++ b/src/aspire/volume/volume.py @@ -468,27 +468,27 @@ def downsample(self, ds_res, mask=None): :param ds_res: Desired resolution. :param mask: Optional NumPy array mask to multiply in Fourier space. """ - if mask is None: - mask = 1.0 original_stack_shape = self.stack_shape v = self.stack_reshape(-1) # take 3D Fourier transform of each volume in the stack - fx = xp.asnumpy(fft.fftshift(fft.fftn(xp.asarray(v._data), axes=(1, 2, 3)))) + fx = fft.fftshift(fft.fftn(xp.asarray(v._data), axes=(1, 2, 3))) + # crop each volume to the desired resolution in frequency space - crop_fx = ( - np.array([crop_pad_3d(fx[i, :, :, :], ds_res) for i in range(self.n_vols)]) - * mask - ) + fx = crop_pad_3d(fx, ds_res) + + # Optionally apply mask + if mask is not None: + fx = fx * xp.asarray(mask) + # inverse Fourier transform of each volume - out = xp.asnumpy( - fft.ifftn(fft.ifftshift(xp.asarray(crop_fx)), axes=(1, 2, 3)) - * (ds_res**3 / self.resolution**3) - ) + out = fft.ifftn(fft.ifftshift(fx), axes=(1, 2, 3)).real + out = out.real * (ds_res**3 / self.resolution**3) + # returns a new Volume object return self.__class__( - np.real(out), symmetry_group=self.symmetry_group + xp.asnumpy(out), symmetry_group=self.symmetry_group ).stack_reshape(original_stack_shape) def shift(self): From 2431495564142840b71672a0116624e57fc05dfe Mon Sep 17 00:00:00 2001 From: Garrett Wright Date: Mon, 1 Jul 2024 15:25:59 -0400 Subject: [PATCH 088/139] add docstring --- tests/test_numeric_sparse.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tests/test_numeric_sparse.py b/tests/test_numeric_sparse.py index 5a8227fe47..e58aa02e6a 100644 --- a/tests/test_numeric_sparse.py +++ b/tests/test_numeric_sparse.py @@ -1,3 +1,7 @@ +""" +Tests basic numpy/cupy functionality of sparse numeric wrappers. +""" + import numpy as np import pytest From 8db122147e8b9f4c8fab59c09469f57f62654737 Mon Sep 17 00:00:00 2001 From: Garrett Wright Date: Mon, 1 Jul 2024 16:02:58 -0400 Subject: [PATCH 089/139] enforce filter dtype --- src/aspire/image/image.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/aspire/image/image.py b/src/aspire/image/image.py index 5a7bde2374..d25ee4baa0 100644 --- a/src/aspire/image/image.py +++ b/src/aspire/image/image.py @@ -420,7 +420,9 @@ def filter(self, filter): # Note image and filter data is intentionally migrated via # `xp.asarray` because all of the subsequent calls until # `asnumpy` are GPU when xp and fft in `cupy` mode. - filter_values = xp.asarray(filter.evaluate_grid(self.resolution)) + filter_values = xp.asarray( + filter.evaluate_grid(self.resolution), dtype=self.dtype + ) # Convolve im_f = fft.centered_fft2(xp.asarray(im._data)) From 329de8f0cf36567fd7156b0d733a2b28b32f5cc1 Mon Sep 17 00:00:00 2001 From: Garrett Wright Date: Mon, 1 Jul 2024 16:18:53 -0400 Subject: [PATCH 090/139] explicitly force C order before cufinufft call --- src/aspire/nufft/cufinufft.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/aspire/nufft/cufinufft.py b/src/aspire/nufft/cufinufft.py index c1d15ff686..218fbd5fb7 100644 --- a/src/aspire/nufft/cufinufft.py +++ b/src/aspire/nufft/cufinufft.py @@ -165,7 +165,8 @@ def adjoint(self, signal): " In the future this will be an error." ) - signal = cp.asarray(signal, dtype=self.complex_dtype) + # Note, if not C order, cuFINUFFT will copy-cast anyway. + signal = cp.asarray(signal, order="C", dtype=self.complex_dtype) res_shape = self.sz # Note, there is a corner case for ntransforms == 1. From da18c563f5da14a2cdb3692fa7c354e450a24f22 Mon Sep 17 00:00:00 2001 From: Garrett Wright Date: Tue, 2 Jul 2024 10:56:38 -0400 Subject: [PATCH 091/139] Add dtype note and utest tolerance for singles --- src/aspire/image/image.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/aspire/image/image.py b/src/aspire/image/image.py index d25ee4baa0..f03372b087 100644 --- a/src/aspire/image/image.py +++ b/src/aspire/image/image.py @@ -420,6 +420,8 @@ def filter(self, filter): # Note image and filter data is intentionally migrated via # `xp.asarray` because all of the subsequent calls until # `asnumpy` are GPU when xp and fft in `cupy` mode. + # + # Second note, filter dtype may not match image dtype. filter_values = xp.asarray( filter.evaluate_grid(self.resolution), dtype=self.dtype ) From 7e2bdb327425201f44b42448bf50c94844f4452e Mon Sep 17 00:00:00 2001 From: Garrett Wright Date: Mon, 15 Jul 2024 10:35:56 -0400 Subject: [PATCH 092/139] configuration doc wording (strings) --- gallery/tutorials/configuration.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gallery/tutorials/configuration.py b/gallery/tutorials/configuration.py index 75354bc422..372d97df06 100644 --- a/gallery/tutorials/configuration.py +++ b/gallery/tutorials/configuration.py @@ -110,7 +110,7 @@ # software is most easily accomplished by installing ASPIRE with one # of the published GPU extensions, for example ``pip install # "aspire[dev,gpu_12x]"``. Once the packages are installed users -# should automatically find that the NUFFT calls are running on the +# should find that the NUFFT calls are automatically running on the # GPU. Additional acceleration is achieved by enabling `cupy` for # `numeric` and `fft` components. # From a7fa3f3cafeff0956f59d1c29747717c16d81d92 Mon Sep 17 00:00:00 2001 From: Garrett Wright Date: Mon, 15 Jul 2024 11:04:40 -0400 Subject: [PATCH 093/139] keep a few more vars as cupy --- src/aspire/basis/fle_2d.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/aspire/basis/fle_2d.py b/src/aspire/basis/fle_2d.py index 631161d0fe..76330e6fba 100644 --- a/src/aspire/basis/fle_2d.py +++ b/src/aspire/basis/fle_2d.py @@ -544,7 +544,7 @@ def _step1_t(self, im): _z = nufft(im, self.grid_xy, epsilon=self.epsilon) * self.h**2 _z = _z.reshape(num_img, self.num_radial_nodes, self.num_angular_nodes // 2) z[:, :, : self.num_angular_nodes // 2] = _z - z[:, :, self.num_angular_nodes // 2 :] = np.conj(_z) + z[:, :, self.num_angular_nodes // 2 :] = _z.conj() return z def _step2_t(self, z): @@ -643,13 +643,13 @@ def _step1(self, z): num_img = z.shape[0] z = z[:, :, : self.num_angular_nodes // 2].reshape(num_img, -1) im = anufft( - z.astype(complex_type(self.dtype)), + z.astype(complex_type(self.dtype), copy=False), self.grid_xy, (self.nres, self.nres), epsilon=self.epsilon, ) - im = im + np.conj(im) - im = np.real(im) + im = im + im.conj() + im = im.real im = im.reshape(num_img, self.nres, self.nres) im[:, self.radial_mask] = 0 From 9ac4be9709a944c43286d86689b9a72b47b4d796 Mon Sep 17 00:00:00 2001 From: Garrett Wright Date: Mon, 15 Jul 2024 15:09:41 -0400 Subject: [PATCH 094/139] replace xp.newaxis with None --- src/aspire/basis/ffb_3d.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/aspire/basis/ffb_3d.py b/src/aspire/basis/ffb_3d.py index 7a2509382f..4137e572a9 100644 --- a/src/aspire/basis/ffb_3d.py +++ b/src/aspire/basis/ffb_3d.py @@ -141,9 +141,9 @@ def _precomp(self): * xp.pi * xp.vstack( ( - fourier_z[xp.newaxis, ...], - fourier_y[xp.newaxis, ...], - fourier_x[xp.newaxis, ...], + fourier_z[None, ...], + fourier_y[None, ...], + fourier_x[None, ...], ) ) ) From 5ab1c7bc8ea9fbcf6240d0e5bb02de96069449ac Mon Sep 17 00:00:00 2001 From: Garrett Wright Date: Tue, 23 Jul 2024 08:40:19 -0400 Subject: [PATCH 095/139] put cache dir on new line --- .github/workflows/workflow.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/workflow.yml b/.github/workflows/workflow.yml index fce5a7f6d4..c41b221ec4 100644 --- a/.github/workflows/workflow.yml +++ b/.github/workflows/workflow.yml @@ -147,7 +147,8 @@ jobs: echo "Stash the WORK_DIR to GitHub env so we can clean it up later." echo "WORK_DIR=${WORK_DIR}" >> $GITHUB_ENV echo -e "ray:\n temp_dir: ${WORK_DIR}\n" > ${WORK_DIR}/config.yaml - echo -e "common:\n cache_dir: ${CI_CACHE_DIR}" >> ${WORK_DIR}/config.yaml + echo -e "common:" >> ${WORK_DIR}/config.yaml + echo -e " cache_dir: ${CI_CACHE_DIR}" >> ${WORK_DIR}/config.yaml echo -e " numeric: cupy" >> ${WORK_DIR}/config.yaml echo -e " fft: cupy\n" >> ${WORK_DIR}/config.yaml echo "Log the config: ${WORK_DIR}/config.yaml" From b86c2d561e493aca8f2214ca60d4bea83300650c Mon Sep 17 00:00:00 2001 From: Garrett Wright Date: Tue, 23 Jul 2024 08:52:57 -0400 Subject: [PATCH 096/139] rename tmp to ang_theta_wtd_trans --- src/aspire/basis/ffb_3d.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/aspire/basis/ffb_3d.py b/src/aspire/basis/ffb_3d.py index 4137e572a9..7f0821b99a 100644 --- a/src/aspire/basis/ffb_3d.py +++ b/src/aspire/basis/ffb_3d.py @@ -308,9 +308,9 @@ def _evaluate_t(self, x): pf = m_reshape(pf.T, (n_theta, n_phi * n_r * n_data)) # evaluate the theta parts - tmp = self._precomp["ang_theta_wtd"].T - u_even = tmp @ pf.real - u_odd = tmp @ pf.imag + ang_theta_wtd_trans = self._precomp["ang_theta_wtd"].T + u_even = ang_theta_wtd_trans @ pf.real + u_odd = ang_theta_wtd_trans @ pf.imag u_even = m_reshape(u_even, (2 * self.ell_max + 1, n_phi, n_r, n_data)) u_odd = m_reshape(u_odd, (2 * self.ell_max + 1, n_phi, n_r, n_data)) From b9f263b0c6dd4aa537fe6f185fe967cc3b03947d Mon Sep 17 00:00:00 2001 From: Garrett Wright Date: Tue, 23 Jul 2024 08:57:42 -0400 Subject: [PATCH 097/139] gpu to GPU and rm dev comment --- src/aspire/nufft/__init__.py | 4 ++-- src/aspire/numeric/cupy_fft.py | 3 --- src/aspire/utils/coor_trans.py | 4 ++-- tests/test_orient_sdp.py | 2 +- 4 files changed, 5 insertions(+), 8 deletions(-) diff --git a/src/aspire/nufft/__init__.py b/src/aspire/nufft/__init__.py index fcfe182918..07d92c736c 100644 --- a/src/aspire/nufft/__init__.py +++ b/src/aspire/nufft/__init__.py @@ -159,7 +159,7 @@ def anufft(sig_f, fourier_pts, sz, real=False, epsilon=1e-8): Selects best available package from `nfft` `backends` configuration list. - When sig_f is provided as a CuPy gpu array with a cufinufft + When sig_f is provided as a CuPy GPU array with a cufinufft backend, result is maintained on GPU. :param sig_f: Array representing the signal(s) in Fourier space to be transformed. \ @@ -211,7 +211,7 @@ def nufft(sig_f, fourier_pts, real=False, epsilon=1e-8): Selects best available package from `nfft` `backends` configuration list. - When sig_f is provided as a CuPy gpu array with a cufinufft + When sig_f is provided as a CuPy GPU array with a cufinufft backend, result is maintained on GPU. :param sig_f: Array representing the signal(s) in real space to be transformed. \ diff --git a/src/aspire/numeric/cupy_fft.py b/src/aspire/numeric/cupy_fft.py index f67937813f..6ad6a4e9da 100644 --- a/src/aspire/numeric/cupy_fft.py +++ b/src/aspire/numeric/cupy_fft.py @@ -25,9 +25,6 @@ def wrapper(self, x, *args, **kwargs): # CuPy's single precision FFT appears to be too inaccurate for # many of our unit tests, so the signal is upcast and recast # on return. - # Todo, discuss with Joakim whether we want this upcasting - # business configurable or keep singles, both in conjunction - # with xfailing the tests. _singles = False if x.dtype == np.float32: _singles = True diff --git a/src/aspire/utils/coor_trans.py b/src/aspire/utils/coor_trans.py index 53f86714c8..cad8fb0295 100644 --- a/src/aspire/utils/coor_trans.py +++ b/src/aspire/utils/coor_trans.py @@ -372,7 +372,7 @@ def crop_pad_2d(im, size, fill_value=0): Crop/pads `im` according to `size`. Padding will use `fill_value`. - Return's host/gpu array based on `im`. + Return's host/GPU array based on `im`. :param im: A >=2-dimensional numpy array :param size: Integer size of cropped/padded output @@ -421,7 +421,7 @@ def crop_pad_3d(vol, size, fill_value=0): Crop/pads `vol` according to `size`. Padding will use `fill_value`. - Return's host/gpu array based on `vol`. + Return's host/GPU array based on `vol`. :param vol: A >=3-dimensional numpy array :param size: Integer size of cropped/padded output diff --git a/tests/test_orient_sdp.py b/tests/test_orient_sdp.py index a161d2fdd7..22658ee06a 100644 --- a/tests/test_orient_sdp.py +++ b/tests/test_orient_sdp.py @@ -77,7 +77,7 @@ def test_estimate_rotations(src_orient_est_fixture): src, orient_est = src_orient_est_fixture if backend_available("cufinufft") and src.dtype == np.float32: - pytest.skip("CI on gpu fails for singles.") + pytest.skip("CI on GPU fails for singles.") orient_est.estimate_rotations() From 0c20e2ebfe014802855d597b2687ee580379d6e0 Mon Sep 17 00:00:00 2001 From: Josh Carmichael Date: Thu, 6 Jun 2024 15:42:33 -0400 Subject: [PATCH 098/139] Add epsilon arg to PowerFilter. --- src/aspire/operators/filters.py | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/src/aspire/operators/filters.py b/src/aspire/operators/filters.py index 9b910a8fe0..bb7491c780 100644 --- a/src/aspire/operators/filters.py +++ b/src/aspire/operators/filters.py @@ -184,9 +184,19 @@ class PowerFilter(Filter): A Filter object that is composed of a regular `Filter` object, but evaluates it to a specified power. """ - def __init__(self, filter, power=1): + def __init__(self, filter, power=1, epsilon=None): + """ + Initialize PowerFilter instance. + + :param filter: A Filter instance. + :param power: Exponent to raise filter values. + :param epsilon: Threshold on filter values that get raised to a negative power. + `filter` values below this threshold will be set to zero during evaluation. + Default uses machine epsilon for filter.dtype. + """ self._filter = filter self._power = power + self._epsilon = epsilon super().__init__(dim=filter.dim, radial=filter.radial) def _evaluate(self, omega): @@ -204,7 +214,9 @@ def evaluate_grid(self, L, *args, dtype=np.float32, **kwargs): # Place safeguard on values below machine epsilon for negative powers. if self._power < 0: - eps = np.finfo(filter_vals.dtype).eps + eps = self._epsilon + if eps is None: + eps = np.finfo(filter_vals.dtype).eps condition = abs(filter_vals) < eps num_less_eps = np.count_nonzero(condition) if num_less_eps > 0: From 0ee7a52531405c496ee62f98db54dacdc0baeab9 Mon Sep 17 00:00:00 2001 From: Josh Carmichael Date: Thu, 6 Jun 2024 16:02:42 -0400 Subject: [PATCH 099/139] Add threshold to whiten function with matlab default. --- src/aspire/source/image.py | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/src/aspire/source/image.py b/src/aspire/source/image.py index 473585acb9..f93a40440b 100644 --- a/src/aspire/source/image.py +++ b/src/aspire/source/image.py @@ -798,7 +798,7 @@ def downsample(self, L): self.L = L @_as_copy - def whiten(self, noise_estimate=None): + def whiten(self, noise_estimate=None, epsilon=None): """ Modify the `ImageSource` in-place by appending a whitening filter to the generation pipeline. @@ -810,6 +810,9 @@ def whiten(self, noise_estimate=None): passed a `NoiseEstimator` the `filter` attribute will be queried. Alternatively, the noise PSD may be passed directly as a `Filter` object. + :param epsilon: Threshold used to determine which frequencies to whiten + and which to set to zero. By default all filter values less than + 100*eps(self.dtype) are zeroed out. :return: On return, the `ImageSource` object has been modified in place. """ @@ -827,8 +830,15 @@ def whiten(self, noise_estimate=None): " instead of `NoiseEstimator` or `Filter`." ) + # Set threshold for whiten_filter. All values such that sqrt(noise_filter) < eps + # will be set to zero in the whiten_filter. + if epsilon is None: + epsilon = 100 * np.finfo(self.dtype).eps + logger.info("Whitening source object") - whiten_filter = PowerFilter(noise_filter, power=-0.5) + # epsilon is squared to account for the PowerFilter applying the threshold + # to noise_filter, not sqrt(noise_filter). + whiten_filter = PowerFilter(noise_filter, power=-0.5, epsilon=epsilon**2) logger.info("Transforming all CTF Filters into Multiplicative Filters") self.unique_filters = [ From d177574b776106b4f94617267dec5a8d914f3dc8 Mon Sep 17 00:00:00 2001 From: Josh Carmichael Date: Mon, 10 Jun 2024 15:27:08 -0400 Subject: [PATCH 100/139] Recast ArrayFilter result after scipy workaround upcast occurs. --- src/aspire/operators/filters.py | 3 ++- tests/test_filters.py | 16 ++++++++++++++++ 2 files changed, 18 insertions(+), 1 deletion(-) diff --git a/src/aspire/operators/filters.py b/src/aspire/operators/filters.py index bb7491c780..df2d33b036 100644 --- a/src/aspire/operators/filters.py +++ b/src/aspire/operators/filters.py @@ -349,7 +349,8 @@ def _evaluate(self, omega): # Result is 1 x np.prod(self.sz) in shape; convert to a 1-d vector result = np.squeeze(result, 0) - return result + # Recast result with correct dtype + return result.astype(self.xfer_fn_array.dtype) def evaluate_grid(self, L, *args, dtype=np.float32, **kwargs): """ diff --git a/tests/test_filters.py b/tests/test_filters.py index 35d7955a9e..33f8dc7349 100644 --- a/tests/test_filters.py +++ b/tests/test_filters.py @@ -361,3 +361,19 @@ def test_power_filter_safeguard(dtype, caplog): # Check caplog for warning. msg = f"setting {num_eps} extremal filter value(s) to zero." assert msg in caplog.text + + +@pytest.mark.parametrize("dtype", [np.float32, np.float64]) +def test_array_filter_dtype_passthrough(dtype): + """ + Do to a bug in scipy versions < 1.10.1, scipy's interpolator crashes + in singles. We have a workaround that upcasts to doubles. This test + ensures that we recast to the correct dtype during calculations. + """ + L = 8 + arr = np.ones((L, L), dtype=dtype) + + filt = ArrayFilter(arr) + filt_vals = filt.evaluate_grid(L, dtype=dtype) + + assert filt_vals.dtype == dtype From 86486987edd488ffec733a495c9ce18ff66004fa Mon Sep 17 00:00:00 2001 From: Josh Carmichael Date: Mon, 10 Jun 2024 16:03:53 -0400 Subject: [PATCH 101/139] Revert to original threshold of eps(dtype) on PSD. --- src/aspire/source/image.py | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/src/aspire/source/image.py b/src/aspire/source/image.py index f93a40440b..b23b5c98e4 100644 --- a/src/aspire/source/image.py +++ b/src/aspire/source/image.py @@ -811,8 +811,8 @@ def whiten(self, noise_estimate=None, epsilon=None): queried. Alternatively, the noise PSD may be passed directly as a `Filter` object. :param epsilon: Threshold used to determine which frequencies to whiten - and which to set to zero. By default all filter values less than - 100*eps(self.dtype) are zeroed out. + and which to set to zero. By default all PSD values in the `noise_estimate` + less than eps(self.dtype) are zeroed out in the whitening filter. :return: On return, the `ImageSource` object has been modified in place. """ @@ -830,15 +830,10 @@ def whiten(self, noise_estimate=None, epsilon=None): " instead of `NoiseEstimator` or `Filter`." ) - # Set threshold for whiten_filter. All values such that sqrt(noise_filter) < eps - # will be set to zero in the whiten_filter. - if epsilon is None: - epsilon = 100 * np.finfo(self.dtype).eps - logger.info("Whitening source object") # epsilon is squared to account for the PowerFilter applying the threshold # to noise_filter, not sqrt(noise_filter). - whiten_filter = PowerFilter(noise_filter, power=-0.5, epsilon=epsilon**2) + whiten_filter = PowerFilter(noise_filter, power=-0.5, epsilon=epsilon) logger.info("Transforming all CTF Filters into Multiplicative Filters") self.unique_filters = [ From 582077fefdf35ab4127e44422005f5942d53a1a1 Mon Sep 17 00:00:00 2001 From: Josh Carmichael Date: Tue, 11 Jun 2024 15:33:23 -0400 Subject: [PATCH 102/139] remove comment --- src/aspire/source/image.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/aspire/source/image.py b/src/aspire/source/image.py index b23b5c98e4..c60b64d701 100644 --- a/src/aspire/source/image.py +++ b/src/aspire/source/image.py @@ -831,8 +831,6 @@ def whiten(self, noise_estimate=None, epsilon=None): ) logger.info("Whitening source object") - # epsilon is squared to account for the PowerFilter applying the threshold - # to noise_filter, not sqrt(noise_filter). whiten_filter = PowerFilter(noise_filter, power=-0.5, epsilon=epsilon) logger.info("Transforming all CTF Filters into Multiplicative Filters") From 644de3c4b3b18e21a8ab29769db352d9fc266ce4 Mon Sep 17 00:00:00 2001 From: Josh Carmichael Date: Tue, 11 Jun 2024 15:44:35 -0400 Subject: [PATCH 103/139] set default epsilon inside whiten function. --- src/aspire/source/image.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/aspire/source/image.py b/src/aspire/source/image.py index c60b64d701..fa5be1f7f7 100644 --- a/src/aspire/source/image.py +++ b/src/aspire/source/image.py @@ -830,6 +830,9 @@ def whiten(self, noise_estimate=None, epsilon=None): " instead of `NoiseEstimator` or `Filter`." ) + if epsilon is None: + epsilon = np.finfo(self.dtype).eps + logger.info("Whitening source object") whiten_filter = PowerFilter(noise_filter, power=-0.5, epsilon=epsilon) From 1c09a39e98252259d2dea62f86fca595b4ed78d3 Mon Sep 17 00:00:00 2001 From: Josh Carmichael Date: Tue, 18 Jun 2024 09:03:49 -0400 Subject: [PATCH 104/139] test PowerFilter argument --- tests/test_filters.py | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/tests/test_filters.py b/tests/test_filters.py index 33f8dc7349..8d9cac9e29 100644 --- a/tests/test_filters.py +++ b/tests/test_filters.py @@ -1,3 +1,4 @@ +import itertools import logging import os.path from unittest import TestCase @@ -332,20 +333,26 @@ def testFilterSigns(self): self.assertTrue(np.allclose(sign_filter.evaluate(self.omega), signs)) -@pytest.mark.parametrize("dtype", [np.float32, np.float64]) -def test_power_filter_safeguard(dtype, caplog): +params = list(itertools.product([np.float32, np.float64], [None, 0.01])) + + +@pytest.mark.parametrize("dtype, epsilon", params) +def test_power_filter_safeguard(dtype, epsilon, caplog): L = 25 arr = np.ones((L, L), dtype=dtype) # Set a few values below machine epsilon. num_eps = 3 - eps = np.finfo(dtype).eps + eps = epsilon + if eps is None: + eps = np.finfo(dtype).eps arr[L // 2, L // 2 : L // 2 + num_eps] = eps / 2 # For negative powers, values below machine eps will be set to zero. filt = PowerFilter( filter=ArrayFilter(arr), power=-0.5, + epsilon=epsilon, ) caplog.clear() From 61d79fb1268c94b0fd52426797b10e98723bfcf7 Mon Sep 17 00:00:00 2001 From: Josh Carmichael Date: Tue, 18 Jun 2024 09:18:42 -0400 Subject: [PATCH 105/139] smoke test for whiten epsilon param. --- tests/test_preprocess_pipeline.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/tests/test_preprocess_pipeline.py b/tests/test_preprocess_pipeline.py index fb7d2427ec..a37b19210a 100644 --- a/tests/test_preprocess_pipeline.py +++ b/tests/test_preprocess_pipeline.py @@ -126,6 +126,15 @@ def testWhiten2(dtype): assert np.allclose(np.eye(2), corr_coef, atol=2e-1) +@pytest.mark.parametrize("dtype", [np.float32, np.float64]) +def test_whiten_epsilon(dtype): + """Smoke test for epsilon argument""" + L = 25 + sim = get_sim_object(L, dtype) + noise_estimator = AnisotropicNoiseEstimator(sim) + _ = sim.whiten(noise_estimator.filter, epsilon=0.01) + + @pytest.mark.parametrize("L, dtype", params) def testInvertContrast(L, dtype): sim1 = get_sim_object(L, dtype) From 75a232b2340caa6e23c24814ca3787ae8c8499c2 Mon Sep 17 00:00:00 2001 From: Josh Carmichael Date: Tue, 18 Jun 2024 15:38:45 -0400 Subject: [PATCH 106/139] bump scipy version. remove upcast. --- pyproject.toml | 2 +- src/aspire/operators/filters.py | 7 +++---- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index c9c25a9976..c674418ec1 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -48,7 +48,7 @@ dependencies = [ "pyshtools<=4.10.4", # 4.11.7 might have a packaging bug "PyWavelets", "ray >= 2.9.2", - "scipy >= 1.10.0", + "scipy >= 1.10.1", "scikit-learn", "scikit-image", "setuptools >= 0.41", diff --git a/src/aspire/operators/filters.py b/src/aspire/operators/filters.py index df2d33b036..c93c34f9cb 100644 --- a/src/aspire/operators/filters.py +++ b/src/aspire/operators/filters.py @@ -333,8 +333,7 @@ def _evaluate(self, omega): # for values slightly outside the interpolation grid bounds. interpolator = RegularGridInterpolator( _input_pts, - # https://github.com/scipy/scipy/issues/17718 - self.xfer_fn_array.astype(np.float64), + self.xfer_fn_array, method="linear", bounds_error=False, fill_value=None, @@ -349,8 +348,8 @@ def _evaluate(self, omega): # Result is 1 x np.prod(self.sz) in shape; convert to a 1-d vector result = np.squeeze(result, 0) - # Recast result with correct dtype - return result.astype(self.xfer_fn_array.dtype) + # Scipy's interpolator will upcast singles. Recasting. + return result.astype(self.xfer_fn_array.dtype, copy=False) def evaluate_grid(self, L, *args, dtype=np.float32, **kwargs): """ From 53b06e368312ea1459af0817fc5af88970a6f87b Mon Sep 17 00:00:00 2001 From: Josh Carmichael Date: Tue, 18 Jun 2024 16:18:30 -0400 Subject: [PATCH 107/139] test that whiten safeguard is actually working. --- tests/test_filters.py | 3 +-- tests/test_preprocess_pipeline.py | 26 +++++++++++++++++++++++--- 2 files changed, 24 insertions(+), 5 deletions(-) diff --git a/tests/test_filters.py b/tests/test_filters.py index 8d9cac9e29..8674f13486 100644 --- a/tests/test_filters.py +++ b/tests/test_filters.py @@ -373,8 +373,7 @@ def test_power_filter_safeguard(dtype, epsilon, caplog): @pytest.mark.parametrize("dtype", [np.float32, np.float64]) def test_array_filter_dtype_passthrough(dtype): """ - Do to a bug in scipy versions < 1.10.1, scipy's interpolator crashes - in singles. We have a workaround that upcasts to doubles. This test + scipy's interpolator will upcast singles. This test ensures that we recast to the correct dtype during calculations. """ L = 8 diff --git a/tests/test_preprocess_pipeline.py b/tests/test_preprocess_pipeline.py index a37b19210a..17a4407008 100644 --- a/tests/test_preprocess_pipeline.py +++ b/tests/test_preprocess_pipeline.py @@ -127,12 +127,32 @@ def testWhiten2(dtype): @pytest.mark.parametrize("dtype", [np.float32, np.float64]) -def test_whiten_epsilon(dtype): - """Smoke test for epsilon argument""" +def test_whiten_safeguard(dtype): + """Test that whitening safeguard works as expected.""" L = 25 + epsilon = 0.02 sim = get_sim_object(L, dtype) noise_estimator = AnisotropicNoiseEstimator(sim) - _ = sim.whiten(noise_estimator.filter, epsilon=0.01) + sim = sim.whiten(noise_estimator.filter, epsilon=epsilon) + + # Get whitening_filter from generation pipeline. + whiten_filt = sim.generation_pipeline.xforms[0].filter.evaluate_grid(sim.L) + + # Generate whitening_filter without safeguard directly from noise_estimator. + filt_vals = noise_estimator.filter.xfer_fn_array + whiten_filt_unsafe = filt_vals**-0.5 + + # Get indices where safeguard should be applied + # and assert that they are not empty. + ind = np.where(filt_vals < epsilon) + np.testing.assert_array_less(0, len(ind[0])) + + # Check that whiten_filt and whiten_filt_unsafe agree up to safeguard indices. + disagree = np.where(whiten_filt != whiten_filt_unsafe) + np.testing.assert_array_equal(ind, disagree) + + # Check that whiten_filt is zero at safeguard indices. + np.testing.assert_allclose(whiten_filt[ind], 0.0) @pytest.mark.parametrize("L, dtype", params) From cf7a77855f64a77841937b428bbaa104471d936f Mon Sep 17 00:00:00 2001 From: Josh Carmichael Date: Thu, 20 Jun 2024 09:27:20 -0400 Subject: [PATCH 108/139] use np.testing in suite that failed on arm. --- tests/test_preprocess_pipeline.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/tests/test_preprocess_pipeline.py b/tests/test_preprocess_pipeline.py index 17a4407008..d7416095b0 100644 --- a/tests/test_preprocess_pipeline.py +++ b/tests/test_preprocess_pipeline.py @@ -101,7 +101,7 @@ def testWhiten(dtype): corr_coef = np.corrcoef(imgs_wt[:, L - 1, L - 1], imgs_wt[:, L - 2, L - 1]) # correlation matrix should be close to identity - assert np.allclose(np.eye(2), corr_coef, atol=1e-1) + np.testing.assert_allclose(np.eye(2), corr_coef, atol=1e-1) # dtype of returned images should be the same assert dtype == imgs_wt.dtype @@ -123,7 +123,7 @@ def testWhiten2(dtype): corr_coef = np.corrcoef(imgs_wt[:, L - 1, L - 1], imgs_wt[:, L - 2, L - 1]) # Correlation matrix should be close to identity - assert np.allclose(np.eye(2), corr_coef, atol=2e-1) + np.testing.assert_allclose(np.eye(2), corr_coef, atol=2e-1) @pytest.mark.parametrize("dtype", [np.float32, np.float64]) @@ -167,7 +167,9 @@ def testInvertContrast(L, dtype): imgs2_rc = sim2.images[:num_images] # all images should be the same after inverting contrast - assert np.allclose(imgs1_rc.asnumpy(), imgs2_rc.asnumpy()) + np.testing.assert_allclose( + imgs1_rc.asnumpy(), imgs2_rc.asnumpy(), rtol=1e-05, atol=1e-08 + ) # dtype of returned images should be the same assert dtype == imgs1_rc.dtype assert dtype == imgs2_rc.dtype From f58976c6a5d7ddb72017ea3f67fe54526b407764 Mon Sep 17 00:00:00 2001 From: Josh Carmichael Date: Thu, 20 Jun 2024 09:38:47 -0400 Subject: [PATCH 109/139] use np.testing in test_FLEbasis2D.py --- tests/test_FLEbasis2D.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/tests/test_FLEbasis2D.py b/tests/test_FLEbasis2D.py index 7d6b3f5c47..ffb1f8f7d1 100644 --- a/tests/test_FLEbasis2D.py +++ b/tests/test_FLEbasis2D.py @@ -142,7 +142,7 @@ def testMatchFBEvaluate(basis): fb_images = fb_basis.evaluate(coefs) fle_images = basis.evaluate(coefs) - assert np.allclose(fb_images._data, fle_images._data, atol=1e-4) + np.testing.assert_allclose(fb_images._data, fle_images._data, atol=1e-4) @pytest.mark.parametrize("basis", test_bases_match_fb, ids=show_fle_params) @@ -159,8 +159,8 @@ def testMatchFBDenseEvaluate(basis): fle_images = Image(fle_out.T.reshape(-1, basis.nres, basis.nres)).asnumpy() # Matrix column reording in match_fb mode flips signs of some of the basis functions - assert np.allclose(np.abs(fb_images), np.abs(fle_images), atol=1e-3) - assert np.allclose(fb_images, fle_images, atol=1e-3) + np.testing.assert_allclose(np.abs(fb_images), np.abs(fle_images), atol=1e-3) + np.testing.assert_allclose(fb_images, fle_images, atol=1e-3) @pytest.mark.parametrize("basis", test_bases_match_fb, ids=show_fle_params) @@ -177,7 +177,7 @@ def testMatchFBEvaluate_t(basis): fb_coefs = fb_basis.evaluate_t(images) fle_coefs = basis.evaluate_t(images) - assert np.allclose(fb_coefs, fle_coefs, atol=1e-4) + np.testing.assert_allclose(fb_coefs, fle_coefs, atol=1e-4) @pytest.mark.parametrize("basis", test_bases_match_fb, ids=show_fle_params) @@ -197,7 +197,7 @@ def testMatchFBDenseEvaluate_t(basis): fle_coefs = basis._create_dense_matrix().T @ vec.T # Matrix column reording in match_fb mode flips signs of some of the basis coefficients - assert np.allclose(np.abs(fb_coefs), np.abs(fle_coefs), atol=1e-4) + np.testing.assert_allclose(np.abs(fb_coefs), np.abs(fle_coefs), atol=1e-4) def testLowPass(): @@ -265,4 +265,4 @@ def testRadialConvolution(): convolution_fft_pad[L // 2 : L // 2 + L, L // 2 : L // 2 + L] ) - assert np.allclose(imgs_convolved_fle, imgs_convolved_slow, atol=1e-5) + np.testing.assert_allclose(imgs_convolved_fle, imgs_convolved_slow, atol=1e-5) From 9984cac8deb4fcaeffc4594d058278bc663ebaa7 Mon Sep 17 00:00:00 2001 From: Josh Carmichael Date: Thu, 20 Jun 2024 11:35:10 -0400 Subject: [PATCH 110/139] revert to upcasting. --- src/aspire/operators/filters.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/aspire/operators/filters.py b/src/aspire/operators/filters.py index c93c34f9cb..5720867e49 100644 --- a/src/aspire/operators/filters.py +++ b/src/aspire/operators/filters.py @@ -333,7 +333,7 @@ def _evaluate(self, omega): # for values slightly outside the interpolation grid bounds. interpolator = RegularGridInterpolator( _input_pts, - self.xfer_fn_array, + self.xfer_fn_array.astype(np.float64), method="linear", bounds_error=False, fill_value=None, From 6cba85883d05b252f75335f813608de29099eb60 Mon Sep 17 00:00:00 2001 From: Josh Carmichael Date: Thu, 20 Jun 2024 12:08:49 -0400 Subject: [PATCH 111/139] update comments --- src/aspire/operators/filters.py | 1 + tests/test_filters.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/src/aspire/operators/filters.py b/src/aspire/operators/filters.py index 5720867e49..f11202d2f2 100644 --- a/src/aspire/operators/filters.py +++ b/src/aspire/operators/filters.py @@ -333,6 +333,7 @@ def _evaluate(self, omega): # for values slightly outside the interpolation grid bounds. interpolator = RegularGridInterpolator( _input_pts, + # scipy requires upcasting to use cython interpolator. self.xfer_fn_array.astype(np.float64), method="linear", bounds_error=False, diff --git a/tests/test_filters.py b/tests/test_filters.py index 8674f13486..83b68d86ca 100644 --- a/tests/test_filters.py +++ b/tests/test_filters.py @@ -373,7 +373,7 @@ def test_power_filter_safeguard(dtype, epsilon, caplog): @pytest.mark.parametrize("dtype", [np.float32, np.float64]) def test_array_filter_dtype_passthrough(dtype): """ - scipy's interpolator will upcast singles. This test + We upcast to use scipy's fast interpolator. This test ensures that we recast to the correct dtype during calculations. """ L = 8 From 6d18cec6e2b85eed9fbae02d11cf7a6537d34801 Mon Sep 17 00:00:00 2001 From: Josh Carmichael Date: Thu, 20 Jun 2024 13:28:50 -0400 Subject: [PATCH 112/139] remove scipy bump --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index c674418ec1..c9c25a9976 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -48,7 +48,7 @@ dependencies = [ "pyshtools<=4.10.4", # 4.11.7 might have a packaging bug "PyWavelets", "ray >= 2.9.2", - "scipy >= 1.10.1", + "scipy >= 1.10.0", "scikit-learn", "scikit-image", "setuptools >= 0.41", From 5aeca45b6bfec814175c5be93ac1fa16d0bed5de Mon Sep 17 00:00:00 2001 From: Josh Carmichael Date: Wed, 10 Jul 2024 13:50:36 -0400 Subject: [PATCH 113/139] Revert scipy workaround. --- src/aspire/operators/filters.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/aspire/operators/filters.py b/src/aspire/operators/filters.py index f11202d2f2..bb7491c780 100644 --- a/src/aspire/operators/filters.py +++ b/src/aspire/operators/filters.py @@ -333,7 +333,7 @@ def _evaluate(self, omega): # for values slightly outside the interpolation grid bounds. interpolator = RegularGridInterpolator( _input_pts, - # scipy requires upcasting to use cython interpolator. + # https://github.com/scipy/scipy/issues/17718 self.xfer_fn_array.astype(np.float64), method="linear", bounds_error=False, @@ -349,8 +349,7 @@ def _evaluate(self, omega): # Result is 1 x np.prod(self.sz) in shape; convert to a 1-d vector result = np.squeeze(result, 0) - # Scipy's interpolator will upcast singles. Recasting. - return result.astype(self.xfer_fn_array.dtype, copy=False) + return result def evaluate_grid(self, L, *args, dtype=np.float32, **kwargs): """ From 598d4408fda2e8477f9b0d299369da46e59f647f Mon Sep 17 00:00:00 2001 From: Josh Carmichael Date: Wed, 10 Jul 2024 14:01:42 -0400 Subject: [PATCH 114/139] xfail ArrayFilter test for singles. --- tests/test_filters.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/tests/test_filters.py b/tests/test_filters.py index 83b68d86ca..f68e261025 100644 --- a/tests/test_filters.py +++ b/tests/test_filters.py @@ -373,9 +373,12 @@ def test_power_filter_safeguard(dtype, epsilon, caplog): @pytest.mark.parametrize("dtype", [np.float32, np.float64]) def test_array_filter_dtype_passthrough(dtype): """ - We upcast to use scipy's fast interpolator. This test - ensures that we recast to the correct dtype during calculations. + We upcast to use scipy's fast interpolator. We do not recast + on exit, so this is an expected fail for singles. """ + if dtype == np.float32: + pytest.xfail(reason="ArrayFilter currently upcasts singles.") + L = 8 arr = np.ones((L, L), dtype=dtype) From 440cda26db86adcd267a956aeefb8fb8c54837fb Mon Sep 17 00:00:00 2001 From: Josh Carmichael Date: Fri, 12 Jul 2024 09:14:23 -0400 Subject: [PATCH 115/139] make xfail strict --- tests/test_filters.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_filters.py b/tests/test_filters.py index f68e261025..e243fc0a18 100644 --- a/tests/test_filters.py +++ b/tests/test_filters.py @@ -377,7 +377,7 @@ def test_array_filter_dtype_passthrough(dtype): on exit, so this is an expected fail for singles. """ if dtype == np.float32: - pytest.xfail(reason="ArrayFilter currently upcasts singles.") + pytest.xfail(reason="ArrayFilter currently upcasts singles.", strict=True) L = 8 arr = np.ones((L, L), dtype=dtype) From 4be9032f585d294434d5b599091c2ed10f2cf836 Mon Sep 17 00:00:00 2001 From: Josh Carmichael Date: Fri, 12 Jul 2024 10:48:51 -0400 Subject: [PATCH 116/139] remove strict param --- tests/test_filters.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_filters.py b/tests/test_filters.py index e243fc0a18..f68e261025 100644 --- a/tests/test_filters.py +++ b/tests/test_filters.py @@ -377,7 +377,7 @@ def test_array_filter_dtype_passthrough(dtype): on exit, so this is an expected fail for singles. """ if dtype == np.float32: - pytest.xfail(reason="ArrayFilter currently upcasts singles.", strict=True) + pytest.xfail(reason="ArrayFilter currently upcasts singles.") L = 8 arr = np.ones((L, L), dtype=dtype) From 4ecf0924e2f467bbaa616ad5e168f1a26e5f40e1 Mon Sep 17 00:00:00 2001 From: Josh Carmichael Date: Tue, 23 Jul 2024 08:44:42 -0400 Subject: [PATCH 117/139] Use pytest fixtures. --- tests/test_filters.py | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/tests/test_filters.py b/tests/test_filters.py index f68e261025..2abe391071 100644 --- a/tests/test_filters.py +++ b/tests/test_filters.py @@ -333,10 +333,20 @@ def testFilterSigns(self): self.assertTrue(np.allclose(sign_filter.evaluate(self.omega), signs)) -params = list(itertools.product([np.float32, np.float64], [None, 0.01])) +DTYPES = [np.float32, np.float64] +EPS = [None, 0.01] + + +@pytest.fixture(params=DTYPES, ids=lambda x: f"dtype={x}", scope="module") +def dtype(request): + return request.param + + +@pytest.fixture(params=EPS, ids=lambda x: f"epsilon={x}", scope="module") +def epsilon(request): + return request.param -@pytest.mark.parametrize("dtype, epsilon", params) def test_power_filter_safeguard(dtype, epsilon, caplog): L = 25 arr = np.ones((L, L), dtype=dtype) @@ -370,7 +380,6 @@ def test_power_filter_safeguard(dtype, epsilon, caplog): assert msg in caplog.text -@pytest.mark.parametrize("dtype", [np.float32, np.float64]) def test_array_filter_dtype_passthrough(dtype): """ We upcast to use scipy's fast interpolator. We do not recast From 2368196839334183e077ff554f8a89ab05a3ff58 Mon Sep 17 00:00:00 2001 From: Josh Carmichael Date: Tue, 23 Jul 2024 08:48:15 -0400 Subject: [PATCH 118/139] remove unused import --- tests/test_filters.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/test_filters.py b/tests/test_filters.py index 2abe391071..911e3b347b 100644 --- a/tests/test_filters.py +++ b/tests/test_filters.py @@ -1,4 +1,3 @@ -import itertools import logging import os.path from unittest import TestCase From 3ae167f92857351bf813dc9ecae9c5ce88d50d69 Mon Sep 17 00:00:00 2001 From: Garrett Wright Date: Wed, 24 Jul 2024 14:31:15 -0400 Subject: [PATCH 119/139] minimal patch to support cupy install and disabled cufinufft --- src/aspire/nufft/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/aspire/nufft/__init__.py b/src/aspire/nufft/__init__.py index 07d92c736c..d38a0df96e 100644 --- a/src/aspire/nufft/__init__.py +++ b/src/aspire/nufft/__init__.py @@ -198,7 +198,7 @@ def anufft(sig_f, fourier_pts, sz, real=False, epsilon=1e-8): adjoint = adjoint.real if real else adjoint - if cp and not _keep_on_gpu: + if cp and isinstance(adjoint, cp.ndarray) and not _keep_on_gpu: adjoint = adjoint.get() return adjoint @@ -259,7 +259,7 @@ def nufft(sig_f, fourier_pts, real=False, epsilon=1e-8): transform = transform.real if real else transform - if cp and not _keep_on_gpu: + if cp and isinstance(transform, cp.ndarray) and not _keep_on_gpu: transform = transform.get() return transform From 57d34e0e67cad122c722b61b158540c23859ef90 Mon Sep 17 00:00:00 2001 From: Garrett Wright Date: Wed, 24 Jul 2024 15:53:32 -0400 Subject: [PATCH 120/139] skip enormous FFB2D test on GPU --- tests/test_FFBbasis2D.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/tests/test_FFBbasis2D.py b/tests/test_FFBbasis2D.py index 8acf7201d1..c3ee42dd75 100644 --- a/tests/test_FFBbasis2D.py +++ b/tests/test_FFBbasis2D.py @@ -6,6 +6,7 @@ from scipy.special import jv from aspire.basis import Coef, FFBBasis2D +from aspire.nufft import all_backends from aspire.source import Simulation from aspire.utils.misc import grid_2d from aspire.volume import Volume @@ -126,6 +127,9 @@ def testShift(self, basis): params = [pytest.param(512, np.float32, marks=pytest.mark.expensive)] +@pytest.mark.skipif( + all_backends()[0] == "cufinufft", reason="Not enough memory to run via GPU" +) @pytest.mark.parametrize( "L, dtype", params, @@ -136,6 +140,7 @@ def testHighResFFBBasis2D(L, dtype): sim = Simulation( n=1, L=L, + C=1, dtype=dtype, amplitudes=1, offsets=0, @@ -149,4 +154,6 @@ def testHighResFFBBasis2D(L, dtype): # Mask to compare inside disk of radius 1. mask = grid_2d(L, normalized=True)["r"] < 1 - assert np.allclose(im_ffb.asnumpy()[0][mask], im.asnumpy()[0][mask], atol=1e-4) + np.testing.assert_allclose( + im_ffb.asnumpy()[0][mask], im.asnumpy()[0][mask], rtol=1e-05, atol=1e-4 + ) From 325b1decbcdac9a66e3a37b74f671423b5addfa6 Mon Sep 17 00:00:00 2001 From: Garrett Wright Date: Thu, 25 Jul 2024 10:36:14 -0400 Subject: [PATCH 121/139] simpler solution --- src/aspire/nufft/__init__.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/src/aspire/nufft/__init__.py b/src/aspire/nufft/__init__.py index d38a0df96e..23ebe2c115 100644 --- a/src/aspire/nufft/__init__.py +++ b/src/aspire/nufft/__init__.py @@ -3,6 +3,7 @@ import numpy as np from aspire import config +from aspire.numeric import xp from aspire.utils import LogFilterByCount, complex_type, real_type cp = None @@ -198,8 +199,8 @@ def anufft(sig_f, fourier_pts, sz, real=False, epsilon=1e-8): adjoint = adjoint.real if real else adjoint - if cp and isinstance(adjoint, cp.ndarray) and not _keep_on_gpu: - adjoint = adjoint.get() + if not _keep_on_gpu: + adjoint = xp.asnumpy(adjoint) return adjoint @@ -259,7 +260,7 @@ def nufft(sig_f, fourier_pts, real=False, epsilon=1e-8): transform = transform.real if real else transform - if cp and isinstance(transform, cp.ndarray) and not _keep_on_gpu: - transform = transform.get() + if not _keep_on_gpu: + transform = xp.asnumpy(transform) return transform From ff06daa8c6fb4186806544078d10f0d814fea0bd Mon Sep 17 00:00:00 2001 From: Garrett Wright Date: Thu, 25 Jul 2024 13:48:35 -0400 Subject: [PATCH 122/139] make the long workflow not so long --- .github/workflows/long_workflow.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/long_workflow.yml b/.github/workflows/long_workflow.yml index bf676b45fb..498d4a0535 100644 --- a/.github/workflows/long_workflow.yml +++ b/.github/workflows/long_workflow.yml @@ -33,8 +33,9 @@ jobs: cat ${WORK_DIR}/config.yaml - name: Run run: | + export OMP_NUM_THREADS=1 ASPIREDIR=${{ env.WORK_DIR }} python -c \ "import aspire; print(aspire.config['ray']['temp_dir'])" - ASPIREDIR=${{ env.WORK_DIR }} python -m pytest -m "expensive" --durations=0 + ASPIREDIR=${{ env.WORK_DIR }} python -m pytest -n8 -m "expensive" --durations=0 - name: Cleanup run: rm -rf ${{ env.WORK_DIR }} From 7ae2f27fc571c7239b40126656533ac7969408cf Mon Sep 17 00:00:00 2001 From: Garrett Wright Date: Thu, 25 Jul 2024 13:52:40 -0400 Subject: [PATCH 123/139] run long workflow on pull requests --- .github/workflows/long_workflow.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/long_workflow.yml b/.github/workflows/long_workflow.yml index 498d4a0535..ec5714a29e 100644 --- a/.github/workflows/long_workflow.yml +++ b/.github/workflows/long_workflow.yml @@ -1,14 +1,14 @@ name: ASPIRE Python Long Running Test Suite on: - push: - branches: - - 'main' - - 'develop' + pull_request: + types: [opened, synchronize, reopened, ready_for_review] jobs: expensive_tests: runs-on: self-hosted + # Only run on review ready pull_requests + if: ${{ github.event_name == 'pull_request' && github.event.pull_request.draft == false }} timeout-minutes: 360 steps: - uses: actions/checkout@v4 From a1c072720d211e177e4e378ad0da3374fa6507ff Mon Sep 17 00:00:00 2001 From: Marc Karimi Date: Mon, 22 Jul 2024 13:51:54 -0400 Subject: [PATCH 124/139] Added backproject script and stub in the image folder --- src/aspire/image/line.py | 96 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 96 insertions(+) create mode 100644 src/aspire/image/line.py diff --git a/src/aspire/image/line.py b/src/aspire/image/line.py new file mode 100644 index 0000000000..078e6b37c3 --- /dev/null +++ b/src/aspire/image/line.py @@ -0,0 +1,96 @@ +import aspire +import numpy as np + + +class Line: + def __init__(self, data, dtype = np.float64): + """ + Initialize a Line Object. Change later (similar intuition from Image class) + Question: Is it a line or collection of line object? + + :param data: Numpy array containing image data with shape + `(..., resolution, resolution)`. + :param dtype: Optionally cast `data` to this dtype. + Defaults to `data.dtype`. + """ + self.dtype = np.dtype(dtype) + if data.ndim == 2: + data = data[np.newaxis, :, :] + if data.ndim < 3: + raise('Projection Dimensions should be more than Three-Dimensions') + self._data = data.astype(self.dtype, copy=False) + self.stack_shape = self._data.shape[:-2] + self.n_images = self._data.shape[0] #broken for higher dimensional stacks + self.n_lines = self._data.shape[-2] + self.n_points = self._data.shape[-1] + # self.n_dim = (self._data.shape[1], self._data.shape[2]) + + def __str__(self): + return f"Line(n_images = {self.n_images}, n_points = {self.n_points})" + + @property + def stack(self): + return self.n_images + + # talk about angles and if they're supposed to be a certain input/output + # why this method and not another (explain design choices) + def back_project(self, angles): + """ + Back Projection Method for a single stack of lines. + + :param filter_name: string, optional + Filter used in frequency domain filtering. Assign None to use no filter. + :param angles: array + assuming not perfectly radial angles + :return: stack of reconstructed + """ + assert len(angles) == self.n_lines, "Angles must match the number of lines." + original_stack = self.stack_shape + n_angles = len(angles) + + ## our implementation + n_img, n_angles, n_rad = sinogram.shape + assert n_angles == len(angles), "gonna have a bad time" + L = n_rad + sinogram = np.fft.ifftshift(self.n_images, axes= -1) + sinogram_ft = np.fft.rfft(self.n_images, axis= -1) + + #grid generation + y_idx = np.fft.rfftfreq(n_rad) * np.pi * 2 + n_real_points = len(y_idx) + pts = np.empty((2, len(angles), n_real_points), dtype=self.dtype) + pts[0] = y_idx[np.newaxis, :] * np.sin(angles)[:, np.newaxis] + pts[1] = y_idx[np.newaxis, :] * np.cos(angles)[:, np.newaxis] + + imgs = aspire.nufft.anufft( + sinogram_ft.reshape(n_img, -1), + pts.reshape(2, n_real_points * len(angles)), + sz=(L, L), + real=True + ).reshape(n_img, L, L) + + return aspire.image.Image(imgs) + + def image_filter(self, filter_name, projections): + """ + Filter Method for projections. Will apply filter to line projection to get collection of projections (ramp, cosine, ... , etc.) + :param projections: Collection of line projections that need to be filtered. + :return: Filtered Projections. + """ + if projections is None: + raise ValueError('The input projections must not be None') + + filter_types = ('ramp', 'shepp-i logan', 'cosine', 'hamming', 'hann', None) + if filter_name is not filter_type: + raise ValueError(f"Unknown filter: {filter_name}") + + # skimage filter + fourier_filter = _get_fourier_filter(projection_size_padded, filter_name) + projection = fft(img, axis=0) * fourier_filter + radon_filtered = np.real(ifft(projection, axis=0)[:img_shape, :]) + + """ + step 0: Look more into filter function from skimage + thoughts: apply filter to each point + """ + pass From 4ddbe8c6e18e70bbf7d5f40cee76e7985d552daf Mon Sep 17 00:00:00 2001 From: Marc Karimi Date: Tue, 23 Jul 2024 03:42:36 -0400 Subject: [PATCH 125/139] Added One-Dimension Test for Backproject --- src/aspire/image/line.py | 71 +++++++++++++++------------------------- tests/test_sinogram.py | 32 ++++++++++++++++++ 2 files changed, 59 insertions(+), 44 deletions(-) diff --git a/src/aspire/image/line.py b/src/aspire/image/line.py index 078e6b37c3..9f2b30c800 100644 --- a/src/aspire/image/line.py +++ b/src/aspire/image/line.py @@ -1,9 +1,19 @@ -import aspire +import logging +import os +from pathlib import Path + import numpy as np +import aspire +from aspire.image import Image +from aspire.nufft import anufft, nufft + +# noticed a lot of classes had these already, might be helpful for pathing, logging info, etc. (inc. os, logging) +logger = logging.getLogger(__name__) + class Line: - def __init__(self, data, dtype = np.float64): + def __init__(self, data, dtype=np.float64): """ Initialize a Line Object. Change later (similar intuition from Image class) Question: Is it a line or collection of line object? @@ -17,11 +27,11 @@ def __init__(self, data, dtype = np.float64): if data.ndim == 2: data = data[np.newaxis, :, :] if data.ndim < 3: - raise('Projection Dimensions should be more than Three-Dimensions') + raise ("Projection Dimensions should be more than Three-Dimensions") self._data = data.astype(self.dtype, copy=False) self.stack_shape = self._data.shape[:-2] - self.n_images = self._data.shape[0] #broken for higher dimensional stacks - self.n_lines = self._data.shape[-2] + self.n_images = self._data.shape[0] # broken for higher dimensional stacks + self.n_lines = self._data.shape[-2] self.n_points = self._data.shape[-1] # self.n_dim = (self._data.shape[1], self._data.shape[2]) @@ -32,65 +42,38 @@ def __str__(self): def stack(self): return self.n_images - # talk about angles and if they're supposed to be a certain input/output - # why this method and not another (explain design choices) def back_project(self, angles): """ Back Projection Method for a single stack of lines. - + :param filter_name: string, optional Filter used in frequency domain filtering. Assign None to use no filter. :param angles: array assuming not perfectly radial angles :return: stack of reconstructed """ - assert len(angles) == self.n_lines, "Angles must match the number of lines." - original_stack = self.stack_shape - n_angles = len(angles) - - ## our implementation + sinogram = self._data n_img, n_angles, n_rad = sinogram.shape - assert n_angles == len(angles), "gonna have a bad time" + assert n_angles == len( + angles + ), "Number of angles must match the number of projections" + L = n_rad - sinogram = np.fft.ifftshift(self.n_images, axes= -1) - sinogram_ft = np.fft.rfft(self.n_images, axis= -1) + sinogram = np.fft.ifftshift(sinogram, axes=-1) + sinogram_ft = np.fft.rfft(sinogram, axis=-1) - #grid generation + # grid generation with real points y_idx = np.fft.rfftfreq(n_rad) * np.pi * 2 n_real_points = len(y_idx) pts = np.empty((2, len(angles), n_real_points), dtype=self.dtype) pts[0] = y_idx[np.newaxis, :] * np.sin(angles)[:, np.newaxis] pts[1] = y_idx[np.newaxis, :] * np.cos(angles)[:, np.newaxis] - + imgs = aspire.nufft.anufft( sinogram_ft.reshape(n_img, -1), pts.reshape(2, n_real_points * len(angles)), sz=(L, L), - real=True + real=True, ).reshape(n_img, L, L) - - return aspire.image.Image(imgs) - - def image_filter(self, filter_name, projections): - """ - Filter Method for projections. Will apply filter to line projection to get collection of projections (ramp, cosine, ... , etc.) - :param projections: Collection of line projections that need to be filtered. - :return: Filtered Projections. - """ - if projections is None: - raise ValueError('The input projections must not be None') - - filter_types = ('ramp', 'shepp-i logan', 'cosine', 'hamming', 'hann', None) - if filter_name is not filter_type: - raise ValueError(f"Unknown filter: {filter_name}") - # skimage filter - fourier_filter = _get_fourier_filter(projection_size_padded, filter_name) - projection = fft(img, axis=0) * fourier_filter - radon_filtered = np.real(ifft(projection, axis=0)[:img_shape, :]) - - """ - step 0: Look more into filter function from skimage - thoughts: apply filter to each point - """ - pass + return aspire.image.Image(imgs) diff --git a/tests/test_sinogram.py b/tests/test_sinogram.py index 56aa6776e0..6459145f52 100644 --- a/tests/test_sinogram.py +++ b/tests/test_sinogram.py @@ -4,6 +4,7 @@ from skimage.transform import radon from aspire.image import Image +from aspire.image.line import Line from aspire.utils import grid_2d # Relative tolerance comparing line projections to scikit @@ -136,3 +137,34 @@ def test_multidim(num_ang): reference_sinograms, axis=-1 ) np.testing.assert_array_less(_nrms, SK_TOL, "Error in image projections.") + + +def test_back_project_single(masked_image, num_ang): + """ + Test Line.backproject on a single stack of line projections or sinogram. Compares the reconstructed image to original image. + """ + # I'll be creating a sinogram representation of camera man + # reusing our grid fixture + # and testing the skimage's backwards project without a filter (note there currently is no filter so blurry + angles = np.linspace(0, 360, num_ang, endpoint=False) + rads = angles / 180 * np.pi + sinogram = Line(masked_image.project(rads)) + back_project = sinogram.back_project(num_ang) + + assert masked_img.shape == back_project.shape, "Shape must be the same." + + # no filter for now + sk_image_iradon = iradon(masked_image, theta=np.degrees(angles), filter_name=None) + + nrms = np.sqrt( + np.mean((sk_image_iradon - back_project) ** 2, axis=-1) + ) / np.linalg.norm(back_project, axis=-1) + np.testing.assert_array_less(nrms, SK_TOL, "Error in image reconstruction.") + + +def test_back_project_multidim(num_ang): + """ + Test Line.backproject on a stack of images. Extension of back_project_single but for multi-dimensional stacks. + """ + # assume once we can get this functioning for single stack + # we can get this working for multiple From 40d594cf2506a37ad355f3de01b7a8a667d25601 Mon Sep 17 00:00:00 2001 From: Marc Karimi Date: Tue, 23 Jul 2024 11:12:00 -0400 Subject: [PATCH 126/139] Stashing --- src/aspire/image/line.py | 33 +++++++++++++++------------------ 1 file changed, 15 insertions(+), 18 deletions(-) diff --git a/src/aspire/image/line.py b/src/aspire/image/line.py index 9f2b30c800..435c0aff5e 100644 --- a/src/aspire/image/line.py +++ b/src/aspire/image/line.py @@ -2,18 +2,16 @@ import os from pathlib import Path -import numpy as np - -import aspire from aspire.image import Image from aspire.nufft import anufft, nufft +import aspire +import numpy as np # noticed a lot of classes had these already, might be helpful for pathing, logging info, etc. (inc. os, logging) logger = logging.getLogger(__name__) - class Line: - def __init__(self, data, dtype=np.float64): + def __init__(self, data, dtype = np.float64): """ Initialize a Line Object. Change later (similar intuition from Image class) Question: Is it a line or collection of line object? @@ -27,11 +25,11 @@ def __init__(self, data, dtype=np.float64): if data.ndim == 2: data = data[np.newaxis, :, :] if data.ndim < 3: - raise ("Projection Dimensions should be more than Three-Dimensions") + raise('Projection Dimensions should be more than Three-Dimensions') self._data = data.astype(self.dtype, copy=False) self.stack_shape = self._data.shape[:-2] - self.n_images = self._data.shape[0] # broken for higher dimensional stacks - self.n_lines = self._data.shape[-2] + self.n_images = self._data.shape[0] #broken for higher dimensional stacks + self.n_lines = self._data.shape[-2] self.n_points = self._data.shape[-1] # self.n_dim = (self._data.shape[1], self._data.shape[2]) @@ -45,7 +43,7 @@ def stack(self): def back_project(self, angles): """ Back Projection Method for a single stack of lines. - + :param filter_name: string, optional Filter used in frequency domain filtering. Assign None to use no filter. :param angles: array @@ -54,26 +52,25 @@ def back_project(self, angles): """ sinogram = self._data n_img, n_angles, n_rad = sinogram.shape - assert n_angles == len( - angles - ), "Number of angles must match the number of projections" + assert n_angles == len(angles), "Number of angles must match the number of projections" L = n_rad - sinogram = np.fft.ifftshift(sinogram, axes=-1) - sinogram_ft = np.fft.rfft(sinogram, axis=-1) + sinogram = np.fft.ifftshift(sinogram, axes= -1) + sinogram_ft = np.fft.rfft(sinogram, axis= -1) - # grid generation with real points + #grid generation with real points y_idx = np.fft.rfftfreq(n_rad) * np.pi * 2 n_real_points = len(y_idx) pts = np.empty((2, len(angles), n_real_points), dtype=self.dtype) pts[0] = y_idx[np.newaxis, :] * np.sin(angles)[:, np.newaxis] pts[1] = y_idx[np.newaxis, :] * np.cos(angles)[:, np.newaxis] - + imgs = aspire.nufft.anufft( sinogram_ft.reshape(n_img, -1), pts.reshape(2, n_real_points * len(angles)), sz=(L, L), - real=True, + real=True ).reshape(n_img, L, L) - + return aspire.image.Image(imgs) + From 17b09dc4c73930870b0656293b5059c6fdb1cd10 Mon Sep 17 00:00:00 2001 From: Marc Karimi Date: Thu, 25 Jul 2024 17:16:10 -0400 Subject: [PATCH 127/139] Fixed Scaling Issue with BackProject and Integrated NRMSE to One Stack Test --- src/aspire/image/line.py | 45 +++++++++++++------------ tests/test_sinogram.py | 72 +++++++++++++++++++++++++++++----------- 2 files changed, 77 insertions(+), 40 deletions(-) diff --git a/src/aspire/image/line.py b/src/aspire/image/line.py index 435c0aff5e..b68a84383d 100644 --- a/src/aspire/image/line.py +++ b/src/aspire/image/line.py @@ -1,17 +1,15 @@ import logging -import os -from pathlib import Path -from aspire.image import Image -from aspire.nufft import anufft, nufft -import aspire import numpy as np +import aspire + # noticed a lot of classes had these already, might be helpful for pathing, logging info, etc. (inc. os, logging) logger = logging.getLogger(__name__) + class Line: - def __init__(self, data, dtype = np.float64): + def __init__(self, data, dtype=np.float64): """ Initialize a Line Object. Change later (similar intuition from Image class) Question: Is it a line or collection of line object? @@ -25,13 +23,14 @@ def __init__(self, data, dtype = np.float64): if data.ndim == 2: data = data[np.newaxis, :, :] if data.ndim < 3: - raise('Projection Dimensions should be more than Three-Dimensions') + assert "Projection Dimensions should be more than Three-Dimensions" self._data = data.astype(self.dtype, copy=False) - self.stack_shape = self._data.shape[:-2] - self.n_images = self._data.shape[0] #broken for higher dimensional stacks - self.n_lines = self._data.shape[-2] - self.n_points = self._data.shape[-1] - # self.n_dim = (self._data.shape[1], self._data.shape[2]) + + # self.stack_shape = self._data.shape[:-2] + # self.n_images = self._data.shape[0] #broken for higher dimensional stacks + # self.n_lines = self._data.shape[-2] + # self.n_points = self._data.shape[-1] + # self.n_dim = (self._data.shape[1], self._data.shape[2]) def __str__(self): return f"Line(n_images = {self.n_images}, n_points = {self.n_points})" @@ -43,7 +42,7 @@ def stack(self): def back_project(self, angles): """ Back Projection Method for a single stack of lines. - + :param filter_name: string, optional Filter used in frequency domain filtering. Assign None to use no filter. :param angles: array @@ -52,25 +51,29 @@ def back_project(self, angles): """ sinogram = self._data n_img, n_angles, n_rad = sinogram.shape - assert n_angles == len(angles), "Number of angles must match the number of projections" + assert n_angles == len( + angles + ), "Number of angles must match the number of projections" L = n_rad - sinogram = np.fft.ifftshift(sinogram, axes= -1) - sinogram_ft = np.fft.rfft(sinogram, axis= -1) + sinogram = np.fft.ifftshift(sinogram, axes=-1) + sinogram_ft = np.fft.rfft(sinogram, axis=-1) - #grid generation with real points + # grid generation with real points y_idx = np.fft.rfftfreq(n_rad) * np.pi * 2 n_real_points = len(y_idx) pts = np.empty((2, len(angles), n_real_points), dtype=self.dtype) pts[0] = y_idx[np.newaxis, :] * np.sin(angles)[:, np.newaxis] pts[1] = y_idx[np.newaxis, :] * np.cos(angles)[:, np.newaxis] - + imgs = aspire.nufft.anufft( sinogram_ft.reshape(n_img, -1), pts.reshape(2, n_real_points * len(angles)), sz=(L, L), - real=True + real=True, ).reshape(n_img, L, L) - + + # normalization which gives us roughly the same error regardless of angles + imgs = imgs / (n_real_points * len(angles)) + return aspire.image.Image(imgs) - diff --git a/tests/test_sinogram.py b/tests/test_sinogram.py index 6459145f52..189a8266ae 100644 --- a/tests/test_sinogram.py +++ b/tests/test_sinogram.py @@ -1,15 +1,17 @@ import numpy as np import pytest from skimage import data -from skimage.transform import radon +from skimage.transform import iradon, radon from aspire.image import Image from aspire.image.line import Line from aspire.utils import grid_2d # Relative tolerance comparing line projections to scikit -# The same tolerance will be used in all scikit comparisons -SK_TOL = 0.005 +# The same tolerance will be used in all scikit forward and backward comparisons +SK_TOL_FORWARDPROJECT = 0.005 + +SK_TOL_BACKPROJECT = 0.2 IMG_SIZES = [ 511, @@ -95,7 +97,9 @@ def test_image_project(masked_image, num_ang): reference_sinogram, axis=-1 ) - np.testing.assert_array_less(nrms, SK_TOL, "Error in image projections.") + np.testing.assert_array_less( + nrms, SK_TOL_FORWARDPROJECT, "Error in image projections." + ) def test_multidim(num_ang): @@ -136,35 +140,65 @@ def test_multidim(num_ang): _nrms = np.sqrt(np.mean((s - reference_sinograms) ** 2, axis=-1)) / np.linalg.norm( reference_sinograms, axis=-1 ) - np.testing.assert_array_less(_nrms, SK_TOL, "Error in image projections.") + np.testing.assert_array_less( + _nrms, SK_TOL_FORWARDPROJECT, "Error in image projections." + ) def test_back_project_single(masked_image, num_ang): """ Test Line.backproject on a single stack of line projections or sinogram. Compares the reconstructed image to original image. """ - # I'll be creating a sinogram representation of camera man - # reusing our grid fixture - # and testing the skimage's backwards project without a filter (note there currently is no filter so blurry angles = np.linspace(0, 360, num_ang, endpoint=False) rads = angles / 180 * np.pi - sinogram = Line(masked_image.project(rads)) - back_project = sinogram.back_project(num_ang) + sinogram_np = masked_image.project(rads) + sinogram = Line(sinogram_np) + back_project = sinogram.back_project(rads) - assert masked_img.shape == back_project.shape, "Shape must be the same." + assert masked_image.shape == back_project.shape, "The shape must be the same." - # no filter for now - sk_image_iradon = iradon(masked_image, theta=np.degrees(angles), filter_name=None) + # generate circular mask w/ radius 1 to reconstructed image + # aim to remove discrepencies for the edges of the image + g = grid_2d(sinogram_np.shape[2], normalized=True, shifted=True) + mask = g["r"] < 1 + our_back_project = back_project.asnumpy()[0] * mask + + # generating sci-kit image backproject method w/ no filter + sk_image_iradon = iradon(sinogram_np[0].T, theta=angles[::-1], filter_name=None) - nrms = np.sqrt( - np.mean((sk_image_iradon - back_project) ** 2, axis=-1) - ) / np.linalg.norm(back_project, axis=-1) - np.testing.assert_array_less(nrms, SK_TOL, "Error in image reconstruction.") + # we apply a normalized root mean square error on the images to find relative error to range of ref. image + # Note: toleranc is typically < 0.2 regardless of angles, pixels, etc. + nrmse = np.sqrt(np.mean((our_back_project - sk_image_iradon) ** 2)) / ( + np.max(sk_image_iradon - np.min(sk_image_iradon)) + ) + assert ( + nrmse < SK_TOL_BACKPROJECT + ), f"NRMSE is too high: {nrmse}, expected less than {SK_TOL_BACKPROJECT}" def test_back_project_multidim(num_ang): """ Test Line.backproject on a stack of images. Extension of back_project_single but for multi-dimensional stacks. """ - # assume once we can get this functioning for single stack - # we can get this working for multiple + L = 512 # pixels + n = 3 + m = 2 + + # Generate a mask + g = grid_2d(L, normalized=True, shifted=True) + mask = g["r"] < 1 + + # Generate images + imgs = Image(np.random.random((m, n, L, L))) * mask + + # Generate line project angles + angles = np.linspace(0, 360, num_ang, endpoint=False) + rads = angles / 180.0 * np.pi + s = imgs.project(rads) + sinogram = Line(s) + return sinogram + # back project + grid + + # sci-kit back project + + # compare nrmse for all images in the stack From d1c7fad773d248a167ba087aeac0bd38656bbe11 Mon Sep 17 00:00:00 2001 From: Marc Karimi Date: Fri, 26 Jul 2024 11:11:13 -0400 Subject: [PATCH 128/139] fixed single back_project test --- tests/test_sinogram.py | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/tests/test_sinogram.py b/tests/test_sinogram.py index 189a8266ae..13b77c4a90 100644 --- a/tests/test_sinogram.py +++ b/tests/test_sinogram.py @@ -180,23 +180,11 @@ def test_back_project_multidim(num_ang): """ Test Line.backproject on a stack of images. Extension of back_project_single but for multi-dimensional stacks. """ - L = 512 # pixels - n = 3 - m = 2 - # Generate a mask - g = grid_2d(L, normalized=True, shifted=True) - mask = g["r"] < 1 # Generate images - imgs = Image(np.random.random((m, n, L, L))) * mask # Generate line project angles - angles = np.linspace(0, 360, num_ang, endpoint=False) - rads = angles / 180.0 * np.pi - s = imgs.project(rads) - sinogram = Line(s) - return sinogram # back project + grid # sci-kit back project From 9d6c2f8515421430c7cd48551c5727fa6832565e Mon Sep 17 00:00:00 2001 From: Garrett Wright Date: Fri, 26 Jul 2024 11:54:31 -0400 Subject: [PATCH 129/139] reorg Line to avoid circ import. Interop Image/Line classes --- src/aspire/image/image.py | 3 ++- src/aspire/line/__init__.py | 1 + src/aspire/{image => line}/line.py | 7 +++---- tests/test_sinogram.py | 2 +- 4 files changed, 7 insertions(+), 6 deletions(-) create mode 100644 src/aspire/line/__init__.py rename src/aspire/{image => line}/line.py (94%) diff --git a/src/aspire/image/image.py b/src/aspire/image/image.py index f03372b087..358a9f207d 100644 --- a/src/aspire/image/image.py +++ b/src/aspire/image/image.py @@ -8,6 +8,7 @@ from PIL import Image as PILImage from scipy.linalg import lstsq +import aspire.line import aspire.volume from aspire.nufft import anufft, nufft from aspire.numeric import fft, xp @@ -220,7 +221,7 @@ def project(self, angles): # Radon transform, output: (stack size, angles, points) image_rt = np.fft.fftshift(np.fft.irfft(image_ft, n=n_points, axis=-1), axes=-1) image_rt = image_rt.reshape(*original_stack, n_angles, n_points) - return image_rt + return aspire.line.Line(image_rt) @property def res(self): diff --git a/src/aspire/line/__init__.py b/src/aspire/line/__init__.py new file mode 100644 index 0000000000..d3856d67ad --- /dev/null +++ b/src/aspire/line/__init__.py @@ -0,0 +1 @@ +from .line import Line diff --git a/src/aspire/image/line.py b/src/aspire/line/line.py similarity index 94% rename from src/aspire/image/line.py rename to src/aspire/line/line.py index b68a84383d..4b947592ed 100644 --- a/src/aspire/image/line.py +++ b/src/aspire/line/line.py @@ -2,9 +2,9 @@ import numpy as np -import aspire +import aspire.image +from aspire.nufft import anufft -# noticed a lot of classes had these already, might be helpful for pathing, logging info, etc. (inc. os, logging) logger = logging.getLogger(__name__) @@ -66,7 +66,7 @@ def back_project(self, angles): pts[0] = y_idx[np.newaxis, :] * np.sin(angles)[:, np.newaxis] pts[1] = y_idx[np.newaxis, :] * np.cos(angles)[:, np.newaxis] - imgs = aspire.nufft.anufft( + imgs = anufft( sinogram_ft.reshape(n_img, -1), pts.reshape(2, n_real_points * len(angles)), sz=(L, L), @@ -75,5 +75,4 @@ def back_project(self, angles): # normalization which gives us roughly the same error regardless of angles imgs = imgs / (n_real_points * len(angles)) - return aspire.image.Image(imgs) diff --git a/tests/test_sinogram.py b/tests/test_sinogram.py index 13b77c4a90..41259ea563 100644 --- a/tests/test_sinogram.py +++ b/tests/test_sinogram.py @@ -4,7 +4,7 @@ from skimage.transform import iradon, radon from aspire.image import Image -from aspire.image.line import Line +from aspire.line import Line from aspire.utils import grid_2d # Relative tolerance comparing line projections to scikit From 5d5f9b789048f2e09a517cbc82176bcc79decc6c Mon Sep 17 00:00:00 2001 From: Garrett Wright Date: Fri, 26 Jul 2024 12:10:50 -0400 Subject: [PATCH 130/139] adjust tests towards Line/Image interop [skip ci] Co-authored-by: Marc Karimi --- tests/test_sinogram.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/test_sinogram.py b/tests/test_sinogram.py index 41259ea563..f410940180 100644 --- a/tests/test_sinogram.py +++ b/tests/test_sinogram.py @@ -151,15 +151,15 @@ def test_back_project_single(masked_image, num_ang): """ angles = np.linspace(0, 360, num_ang, endpoint=False) rads = angles / 180 * np.pi - sinogram_np = masked_image.project(rads) - sinogram = Line(sinogram_np) + sinogram = masked_image.project(rads) + sinogram_np = sinogram.asnumpy() back_project = sinogram.back_project(rads) assert masked_image.shape == back_project.shape, "The shape must be the same." # generate circular mask w/ radius 1 to reconstructed image # aim to remove discrepencies for the edges of the image - g = grid_2d(sinogram_np.shape[2], normalized=True, shifted=True) + g = grid_2d(back_project.resolution, normalized=True, shifted=True) mask = g["r"] < 1 our_back_project = back_project.asnumpy()[0] * mask From 916522644026483ff93dc1c9cd6e7932c625304b Mon Sep 17 00:00:00 2001 From: Marc Karimi Date: Fri, 26 Jul 2024 15:50:31 -0400 Subject: [PATCH 131/139] passing the 20/20 test cases and added Attributes + Methods to the Line class [need fix] --- src/aspire/line/line.py | 118 +++++++++++++++++++++++++++++++++++++--- tests/test_sinogram.py | 6 +- 2 files changed, 114 insertions(+), 10 deletions(-) diff --git a/src/aspire/line/line.py b/src/aspire/line/line.py index 4b947592ed..6a19f03b94 100644 --- a/src/aspire/line/line.py +++ b/src/aspire/line/line.py @@ -1,5 +1,8 @@ import logging +import os +from warnings import catch_warnings, filterwarnings, simplefilter, warn +import mrcfile import numpy as np import aspire.image @@ -12,7 +15,6 @@ class Line: def __init__(self, data, dtype=np.float64): """ Initialize a Line Object. Change later (similar intuition from Image class) - Question: Is it a line or collection of line object? :param data: Numpy array containing image data with shape `(..., resolution, resolution)`. @@ -25,15 +27,117 @@ def __init__(self, data, dtype=np.float64): if data.ndim < 3: assert "Projection Dimensions should be more than Three-Dimensions" self._data = data.astype(self.dtype, copy=False) + self.ndim = self._data.ndim + self.shape = self._data.shape + self.stack_shape = self._data.shape[:-2] + self.stack_n_dim = self._data.ndim - 2 # fix + self.n = np.product(self.stack_shape) # stack number + self.n_angles = self._data.shape[-1] # fix + self.n_radial_points = self._data.shape[-1] - # self.stack_shape = self._data.shape[:-2] - # self.n_images = self._data.shape[0] #broken for higher dimensional stacks - # self.n_lines = self._data.shape[-2] - # self.n_points = self._data.shape[-1] - # self.n_dim = (self._data.shape[1], self._data.shape[2]) + # Numpy interop + # https://numpy.org/devdocs/user/basics.interoperability.html#the-array-interface-protocol + self.__array_interface__ = self._data.__array_interface__ + self.__array__ = self._data + + def _check_key_dims(self, key): + if isinstance(key, tuple) and (len(key) > self._data.ndim): + raise ValueError( + f"Line stack_dim is {self.stack_n_dim}, slice length must be =< {self.n_dim}" + ) + + def __getitem__(self, key): + self._check_key_dims(key) + return self.__class__(self._data[key]) + + def __setitem__(self, key, value): + self._check_key_dims(key) + self._data[key] = value + + def stack_reshape(self, *args): + """ + Reshape the stack axis. + + :*args: Integer(s) or tuple describing the intended shape. + + :returns: Line instance + """ + + # If we're passed a tuple, use that + if len(args) == 1 and isinstance(args[0], tuple): + shape = args[0] + else: + # Otherwise use the variadic args + shape = args + + # Sanity check the size + if shape != (-1,) and np.prod(shape) != self.n: + raise ValueError( + f"Number of sinogram images {self.n_images} cannot be reshaped to {shape}." + ) + + return self.__class__(self._data.reshape(*shape, *self._data.shape[-2:])) + + def asnumpy(self): + """ + Return image data as a (, angles, radians) + read-only array view. + + :return: read-only ndarray view + """ + + view = self._data.view() + view.flags.writeable = False + return view + + def copy(self): + return self.__class__(self._data.copy()) + + # fix later + def save(self, mrcs_filepath, overwrite=False): + if self.stack_ndim > 1: + raise NotImplementedError("`save` is currently limited to 1D image stacks.") + + with mrcfile.new(mrcs_filepath, overwrite=overwrite) as mrc: + # original input format (the image index first) + mrc.set_data(self._data.astype(np.float32)) + + # fix later + @staticmethod + def load(filepath, dtype=None): + """ + Load raw data from supported files. + + Currently MRC and TIFF are supported. + + :param filepath: File path (string). + :param dtype: Optionally force cast to `dtype`. + Default dtype is inferred from the file contents. + :return: numpy array of image data. + """ + + # Get the file extension + ext = os.path.splitext(filepath)[1] + + # On unsupported extension, raise with suggested file types + if ext not in Image.extensions: + raise RuntimeError( + f"Attempting to open unsupported file extension '{ext}', try {list(Image.extensions.keys())}." + ) + + # Call the appropriate file reader + im = Image.extensions[ext](filepath) + + # Attempt casting when user provides dtype + if dtype is not None: + im = im.astype(dtype, copy=False) + + # Return as Image instance + return Image(im) def __str__(self): - return f"Line(n_images = {self.n_images}, n_points = {self.n_points})" + # fix later + return f"Line(n_images = {self.n}, n_angles = {self.n_points}, n_radial_points = {self.n_radial_points})" @property def stack(self): diff --git a/tests/test_sinogram.py b/tests/test_sinogram.py index f410940180..e8dbf797df 100644 --- a/tests/test_sinogram.py +++ b/tests/test_sinogram.py @@ -93,9 +93,9 @@ def test_image_project(masked_image, num_ang): assert reference_sinogram.shape == (len(angles), ny), "Incorrect Shape" # compare project method on ski-image reference - nrms = np.sqrt(np.mean((s[0] - reference_sinogram) ** 2, axis=-1)) / np.linalg.norm( - reference_sinogram, axis=-1 - ) + nrms = np.sqrt( + np.mean((s[0]._data - reference_sinogram) ** 2, axis=-1) + ) / np.linalg.norm(reference_sinogram, axis=-1) np.testing.assert_array_less( nrms, SK_TOL_FORWARDPROJECT, "Error in image projections." From 1bca0a9a47ae2c30bd8d49fc4159dc2712aa1aab Mon Sep 17 00:00:00 2001 From: Marc Karimi Date: Tue, 30 Jul 2024 05:04:39 -0400 Subject: [PATCH 132/139] finished multidim test --- src/aspire/line/line.py | 67 ++++++++--------------------------------- tests/test_sinogram.py | 49 ++++++++++++++++++++++++------ 2 files changed, 52 insertions(+), 64 deletions(-) diff --git a/src/aspire/line/line.py b/src/aspire/line/line.py index 6a19f03b94..58cdad88a3 100644 --- a/src/aspire/line/line.py +++ b/src/aspire/line/line.py @@ -30,9 +30,9 @@ def __init__(self, data, dtype=np.float64): self.ndim = self._data.ndim self.shape = self._data.shape self.stack_shape = self._data.shape[:-2] - self.stack_n_dim = self._data.ndim - 2 # fix - self.n = np.product(self.stack_shape) # stack number - self.n_angles = self._data.shape[-1] # fix + self.stack_n_dim = self._data.ndim - 2 + self.n = np.product(self.stack_shape) + self.n_angles = self._data.shape[-2] self.n_radial_points = self._data.shape[-1] # Numpy interop @@ -93,50 +93,7 @@ def asnumpy(self): def copy(self): return self.__class__(self._data.copy()) - # fix later - def save(self, mrcs_filepath, overwrite=False): - if self.stack_ndim > 1: - raise NotImplementedError("`save` is currently limited to 1D image stacks.") - - with mrcfile.new(mrcs_filepath, overwrite=overwrite) as mrc: - # original input format (the image index first) - mrc.set_data(self._data.astype(np.float32)) - - # fix later - @staticmethod - def load(filepath, dtype=None): - """ - Load raw data from supported files. - - Currently MRC and TIFF are supported. - - :param filepath: File path (string). - :param dtype: Optionally force cast to `dtype`. - Default dtype is inferred from the file contents. - :return: numpy array of image data. - """ - - # Get the file extension - ext = os.path.splitext(filepath)[1] - - # On unsupported extension, raise with suggested file types - if ext not in Image.extensions: - raise RuntimeError( - f"Attempting to open unsupported file extension '{ext}', try {list(Image.extensions.keys())}." - ) - - # Call the appropriate file reader - im = Image.extensions[ext](filepath) - - # Attempt casting when user provides dtype - if dtype is not None: - im = im.astype(dtype, copy=False) - - # Return as Image instance - return Image(im) - def __str__(self): - # fix later return f"Line(n_images = {self.n}, n_angles = {self.n_points}, n_radial_points = {self.n_radial_points})" @property @@ -153,30 +110,30 @@ def back_project(self, angles): assuming not perfectly radial angles :return: stack of reconstructed """ - sinogram = self._data - n_img, n_angles, n_rad = sinogram.shape - assert n_angles == len( - angles + assert ( + len(angles) == self.n_angles ), "Number of angles must match the number of projections" - L = n_rad + original_stack_shape = self.stack_shape + sinogram = self.stack_reshape(-1) + L = self.n_radial_points sinogram = np.fft.ifftshift(sinogram, axes=-1) sinogram_ft = np.fft.rfft(sinogram, axis=-1) # grid generation with real points - y_idx = np.fft.rfftfreq(n_rad) * np.pi * 2 + y_idx = np.fft.rfftfreq(self.n_radial_points) * np.pi * 2 n_real_points = len(y_idx) pts = np.empty((2, len(angles), n_real_points), dtype=self.dtype) pts[0] = y_idx[np.newaxis, :] * np.sin(angles)[:, np.newaxis] pts[1] = y_idx[np.newaxis, :] * np.cos(angles)[:, np.newaxis] imgs = anufft( - sinogram_ft.reshape(n_img, -1), + sinogram_ft.reshape(self.n, -1), pts.reshape(2, n_real_points * len(angles)), sz=(L, L), real=True, - ).reshape(n_img, L, L) + ).reshape(self.n, L, L) # normalization which gives us roughly the same error regardless of angles imgs = imgs / (n_real_points * len(angles)) - return aspire.image.Image(imgs) + return aspire.image.Image(imgs).stack_reshape(original_stack_shape) diff --git a/tests/test_sinogram.py b/tests/test_sinogram.py index e8dbf797df..f9a24b1590 100644 --- a/tests/test_sinogram.py +++ b/tests/test_sinogram.py @@ -71,7 +71,7 @@ def masked_image(dtype, img_size): # Image.project and compare results to skimage.radon -def test_image_project(masked_image, num_ang): +def test_project_single(masked_image, num_ang): """ Test Image.project on a single stack of images. Compares project method output with skimage project. """ @@ -102,7 +102,7 @@ def test_image_project(masked_image, num_ang): ) -def test_multidim(num_ang): +def test_project_multidim(num_ang): """ Test Image.project on stacks of images. Extension of test_image_project but for multi-dimensional stacks. """ @@ -167,7 +167,7 @@ def test_back_project_single(masked_image, num_ang): sk_image_iradon = iradon(sinogram_np[0].T, theta=angles[::-1], filter_name=None) # we apply a normalized root mean square error on the images to find relative error to range of ref. image - # Note: toleranc is typically < 0.2 regardless of angles, pixels, etc. + # Note: tolerance is typically < 0.2 regardless of angles, pixels, etc. nrmse = np.sqrt(np.mean((our_back_project - sk_image_iradon) ** 2)) / ( np.max(sk_image_iradon - np.min(sk_image_iradon)) ) @@ -178,15 +178,46 @@ def test_back_project_single(masked_image, num_ang): def test_back_project_multidim(num_ang): """ - Test Line.backproject on a stack of images. Extension of back_project_single but for multi-dimensional stacks. + Test Line.backproject on a stack of images. Extension of back_project_single but for multi-dimensional stacks. Similar to forward_multidim test. """ - # Generate a mask + L = 512 # pixels + n = 3 + m = 2 + + g = grid_2d(L, normalized=True, shifted=True) + mask = g["r"] < 1 # Generate images + imgs = Image(np.random.random((m, n, L, L))) * mask + angles = np.linspace(0, 360, num_ang, endpoint=False) + rads = angles / 180 * np.pi - # Generate line project angles - # back project + grid + # apply a forward project on the image, then backwards + ours_forward = imgs.project(rads) + ours_backward = ours_forward.back_project(rads) - # sci-kit back project + # Compare + reference_back_projects = np.empty((m, n, L, L)) + for i in range(m): + for j in range(n): + img = imgs[i, j] + # Compute the singleton case, and compare with stack. + single_sinogram = img.project(rads) + back_project = single_sinogram.back_project(rads) + + # These should be allclose up to determinism. + np.testing.assert_allclose(ours_backward[i, j : j + 1], back_project[0]) + + # Next individually compute sk's iradon transform for each image. + reference_back_projects[i, j] = iradon( + single_sinogram.asnumpy()[0].T, theta=angles[::-1], filter_name=None + ) - # compare nrmse for all images in the stack + # apply a mask, then find the NRMSE on the collection of images + # similar tolerance level to single project test + nrmse = np.sqrt( + np.mean((ours_backward.asnumpy() * mask - reference_back_projects) ** 2) + ) / (np.max(reference_back_projects) - np.min(reference_back_projects)) + assert ( + nrmse < SK_TOL_BACKPROJECT + ), f"NRMSE is too high for image ({i},{j}): {nrmse}, expected less than {SK_TOL_BACKPROJECT}" From f3a0925bea0e3453b41f60975b1ce484438448ff Mon Sep 17 00:00:00 2001 From: Marc Karimi Date: Tue, 30 Jul 2024 11:54:56 -0400 Subject: [PATCH 133/139] removed unused statements --- src/aspire/line/line.py | 3 --- tests/test_sinogram.py | 1 - 2 files changed, 4 deletions(-) diff --git a/src/aspire/line/line.py b/src/aspire/line/line.py index 58cdad88a3..f9c688f71d 100644 --- a/src/aspire/line/line.py +++ b/src/aspire/line/line.py @@ -1,8 +1,5 @@ import logging -import os -from warnings import catch_warnings, filterwarnings, simplefilter, warn -import mrcfile import numpy as np import aspire.image diff --git a/tests/test_sinogram.py b/tests/test_sinogram.py index f9a24b1590..9b27b10dca 100644 --- a/tests/test_sinogram.py +++ b/tests/test_sinogram.py @@ -4,7 +4,6 @@ from skimage.transform import iradon, radon from aspire.image import Image -from aspire.line import Line from aspire.utils import grid_2d # Relative tolerance comparing line projections to scikit From a1eba5b1aa182fd4bcec3130b8a2bf6badc541e6 Mon Sep 17 00:00:00 2001 From: Marc Karimi Date: Tue, 30 Jul 2024 12:18:31 -0400 Subject: [PATCH 134/139] initial fft changes --- src/aspire/image/image.py | 14 +++++++------- src/aspire/numeric/cupy_fft.py | 12 ++++++++++++ src/aspire/numeric/scipy_fft.py | 9 +++++++++ 3 files changed, 28 insertions(+), 7 deletions(-) diff --git a/src/aspire/image/image.py b/src/aspire/image/image.py index 358a9f207d..7ce6686418 100644 --- a/src/aspire/image/image.py +++ b/src/aspire/image/image.py @@ -204,24 +204,24 @@ def project(self, angles): original_stack = self.stack_shape # 2-D grid - radial_idx = np.fft.rfftfreq(n_points) * np.pi * 2 + radial_idx = fft.rfftfreq(n_points) * xp.pi * 2 n_real_points = len(radial_idx) n_angles = len(angles) - pts = np.empty((2, n_angles, n_real_points), dtype=self.dtype) - pts[0] = radial_idx[np.newaxis, :] * np.sin(angles)[:, np.newaxis] - pts[1] = radial_idx[np.newaxis, :] * np.cos(angles)[:, np.newaxis] + pts = xp.empty((2, n_angles, n_real_points), dtype=self.dtype) + pts[0] = radial_idx[xp.newaxis, :] * xp.sin(angles)[:, xp.newaxis] + pts[1] = radial_idx[xp.newaxis, :] * xp.cos(angles)[:, xp.newaxis] pts = pts.reshape(2, n_real_points * n_angles) # compute the polar nufft (NUFFT) - image_ft = nufft(self.stack_reshape(-1)._data, pts).reshape( + image_ft = nufft(xp.asarray(self.stack_reshape(-1)._data), pts).reshape( self.n_images, n_angles, n_real_points ) # Radon transform, output: (stack size, angles, points) - image_rt = np.fft.fftshift(np.fft.irfft(image_ft, n=n_points, axis=-1), axes=-1) + image_rt = fft.fftshift(fft.irfft(image_ft, n=n_points, axis=-1), axes=-1) image_rt = image_rt.reshape(*original_stack, n_angles, n_points) - return aspire.line.Line(image_rt) + return aspire.line.Line(xp.asnumpy(image_rt)) @property def res(self): diff --git a/src/aspire/numeric/cupy_fft.py b/src/aspire/numeric/cupy_fft.py index 6ad6a4e9da..ce537a1cba 100644 --- a/src/aspire/numeric/cupy_fft.py +++ b/src/aspire/numeric/cupy_fft.py @@ -100,3 +100,15 @@ def dct(self, x, **kwargs): @_preserve_host def idct(self, x, **kwargs): return cufft.idct(x, **kwargs) + + @_preserve_host + def rfftfreq(self, x, **kwargs): + return cufft.rfftfreq(x, **kwargs) + + @_preserve_host + def irfft(self, x, **kwargs): + return cufft.irfft(x, **kwargs) + + @_preserve_host + def rfft(self, x, **kwargs): + return cufft.rfft(x, **kwargs) diff --git a/src/aspire/numeric/scipy_fft.py b/src/aspire/numeric/scipy_fft.py index 3891d45671..0ef5c95f16 100644 --- a/src/aspire/numeric/scipy_fft.py +++ b/src/aspire/numeric/scipy_fft.py @@ -39,3 +39,12 @@ def dct(self, x, **kwargs): def idct(self, x, **kwargs): return sp.fft.idct(x, **kwargs) + + def rfftfreq(self, x, **kwargs): + return sp.fft.rfftfreq(x, **kwargs) + + def irfft(self, x, **kwargs): + return sp.fft.irfft(x, **kwargs) + + def rfft(self, x, **kwargs): + return sp.fft.rfft(x, **kwargs) From de5388f9b3418342dbbfc9ba1e56577cb43e410b Mon Sep 17 00:00:00 2001 From: Marc Karimi Date: Tue, 30 Jul 2024 12:38:37 -0400 Subject: [PATCH 135/139] stashing gpu fixes --- src/aspire/image/image.py | 1 + src/aspire/numeric/cupy_fft.py | 5 ++--- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/aspire/image/image.py b/src/aspire/image/image.py index 7ce6686418..0b74cfc3d0 100644 --- a/src/aspire/image/image.py +++ b/src/aspire/image/image.py @@ -207,6 +207,7 @@ def project(self, angles): radial_idx = fft.rfftfreq(n_points) * xp.pi * 2 n_real_points = len(radial_idx) n_angles = len(angles) + angles = xp.asarray(angles) pts = xp.empty((2, n_angles, n_real_points), dtype=self.dtype) pts[0] = radial_idx[xp.newaxis, :] * xp.sin(angles)[:, xp.newaxis] diff --git a/src/aspire/numeric/cupy_fft.py b/src/aspire/numeric/cupy_fft.py index ce537a1cba..b491a0dcd1 100644 --- a/src/aspire/numeric/cupy_fft.py +++ b/src/aspire/numeric/cupy_fft.py @@ -101,9 +101,8 @@ def dct(self, x, **kwargs): def idct(self, x, **kwargs): return cufft.idct(x, **kwargs) - @_preserve_host - def rfftfreq(self, x, **kwargs): - return cufft.rfftfreq(x, **kwargs) + def rfftfreq(self, n, **kwargs): + return cufft.rfftfreq(n, **kwargs) @_preserve_host def irfft(self, x, **kwargs): From 01d5995b5a50e4a8121edb2a9fd7aac601b33d93 Mon Sep 17 00:00:00 2001 From: Marc Karimi Date: Tue, 30 Jul 2024 13:41:05 -0400 Subject: [PATCH 136/139] forward gpu --- src/aspire/config_default.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/aspire/config_default.yaml b/src/aspire/config_default.yaml index def78983c0..fed4cea50a 100644 --- a/src/aspire/config_default.yaml +++ b/src/aspire/config_default.yaml @@ -1,9 +1,9 @@ version: 0.12.3 common: # numeric module to use - one of numpy/cupy - numeric: numpy + numeric: cupy # fft backend to use - one of pyfftw/scipy/cupy/mkl - fft: scipy + fft: cupy # Set cache directory for ASPIRE example data. # By default the cache location will be set by pooch.os_cache(), From b9bee171a82caba911e77ed448f7b56bbd5f04af Mon Sep 17 00:00:00 2001 From: Marc Karimi Date: Fri, 2 Aug 2024 00:52:32 -0400 Subject: [PATCH 137/139] changed backproject to run on gpu (cupy) --- src/aspire/line/line.py | 13 +++++++------ tests/test_sinogram.py | 15 ++++++++++----- 2 files changed, 17 insertions(+), 11 deletions(-) diff --git a/src/aspire/line/line.py b/src/aspire/line/line.py index f9c688f71d..fb1b180a6b 100644 --- a/src/aspire/line/line.py +++ b/src/aspire/line/line.py @@ -4,6 +4,7 @@ import aspire.image from aspire.nufft import anufft +from aspire.numeric import fft, xp logger = logging.getLogger(__name__) @@ -114,15 +115,15 @@ def back_project(self, angles): original_stack_shape = self.stack_shape sinogram = self.stack_reshape(-1) L = self.n_radial_points - sinogram = np.fft.ifftshift(sinogram, axes=-1) - sinogram_ft = np.fft.rfft(sinogram, axis=-1) + sinogram = fft.ifftshift(sinogram, axes=-1) + sinogram_ft = fft.rfft(sinogram, axis=-1) # grid generation with real points - y_idx = np.fft.rfftfreq(self.n_radial_points) * np.pi * 2 + y_idx = xp.fft.rfftfreq(self.n_radial_points) * xp.pi * 2 n_real_points = len(y_idx) - pts = np.empty((2, len(angles), n_real_points), dtype=self.dtype) - pts[0] = y_idx[np.newaxis, :] * np.sin(angles)[:, np.newaxis] - pts[1] = y_idx[np.newaxis, :] * np.cos(angles)[:, np.newaxis] + pts = xp.empty((2, len(angles), n_real_points), dtype=self.dtype) + pts[0] = y_idx[xp.newaxis, :] * xp.sin(angles)[:, xp.newaxis] + pts[1] = y_idx[xp.newaxis, :] * xp.cos(angles)[:, xp.newaxis] imgs = anufft( sinogram_ft.reshape(self.n, -1), diff --git a/tests/test_sinogram.py b/tests/test_sinogram.py index 9b27b10dca..d186866012 100644 --- a/tests/test_sinogram.py +++ b/tests/test_sinogram.py @@ -4,6 +4,7 @@ from skimage.transform import iradon, radon from aspire.image import Image +from aspire.numeric import xp from aspire.utils import grid_2d # Relative tolerance comparing line projections to scikit @@ -146,9 +147,12 @@ def test_project_multidim(num_ang): def test_back_project_single(masked_image, num_ang): """ - Test Line.backproject on a single stack of line projections or sinogram. Compares the reconstructed image to original image. + Test Line.backproject on a single stack of line projections or sinogram. Compares the reconstructed image to original. """ - angles = np.linspace(0, 360, num_ang, endpoint=False) + angles = xp.asarray(np.linspace(0, 360, num_ang, endpoint=False)) + angles_np = xp.asnumpy( + angles + ) # skimage requires numpy array while we're using cupy arrays rads = angles / 180 * np.pi sinogram = masked_image.project(rads) sinogram_np = sinogram.asnumpy() @@ -163,7 +167,7 @@ def test_back_project_single(masked_image, num_ang): our_back_project = back_project.asnumpy()[0] * mask # generating sci-kit image backproject method w/ no filter - sk_image_iradon = iradon(sinogram_np[0].T, theta=angles[::-1], filter_name=None) + sk_image_iradon = iradon(sinogram_np[0].T, theta=angles_np[::-1], filter_name=None) # we apply a normalized root mean square error on the images to find relative error to range of ref. image # Note: tolerance is typically < 0.2 regardless of angles, pixels, etc. @@ -188,7 +192,8 @@ def test_back_project_multidim(num_ang): # Generate images imgs = Image(np.random.random((m, n, L, L))) * mask - angles = np.linspace(0, 360, num_ang, endpoint=False) + angles = xp.asarray(np.linspace(0, 360, num_ang, endpoint=False)) + angles_np = xp.asnumpy(angles) # need this for the skimage transformations rads = angles / 180 * np.pi # apply a forward project on the image, then backwards @@ -209,7 +214,7 @@ def test_back_project_multidim(num_ang): # Next individually compute sk's iradon transform for each image. reference_back_projects[i, j] = iradon( - single_sinogram.asnumpy()[0].T, theta=angles[::-1], filter_name=None + single_sinogram.asnumpy()[0].T, theta=angles_np[::-1], filter_name=None ) # apply a mask, then find the NRMSE on the collection of images From 0007b527e5c377a6ddb5377247f7e894d4076fb7 Mon Sep 17 00:00:00 2001 From: Marc Karimi Date: Fri, 2 Aug 2024 11:20:04 -0400 Subject: [PATCH 138/139] revert config --- src/aspire/config_default.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/aspire/config_default.yaml b/src/aspire/config_default.yaml index fed4cea50a..def78983c0 100644 --- a/src/aspire/config_default.yaml +++ b/src/aspire/config_default.yaml @@ -1,9 +1,9 @@ version: 0.12.3 common: # numeric module to use - one of numpy/cupy - numeric: cupy + numeric: numpy # fft backend to use - one of pyfftw/scipy/cupy/mkl - fft: cupy + fft: scipy # Set cache directory for ASPIRE example data. # By default the cache location will be set by pooch.os_cache(), From 8952f53830f32aeae9c8ff693f07b85d00e16126 Mon Sep 17 00:00:00 2001 From: Marc Karimi Date: Fri, 2 Aug 2024 12:47:05 -0400 Subject: [PATCH 139/139] fixed gpu issues --- src/aspire/line/line.py | 6 +++--- tests/test_sinogram.py | 13 ++++--------- 2 files changed, 7 insertions(+), 12 deletions(-) diff --git a/src/aspire/line/line.py b/src/aspire/line/line.py index fb1b180a6b..7787367c3c 100644 --- a/src/aspire/line/line.py +++ b/src/aspire/line/line.py @@ -113,13 +113,13 @@ def back_project(self, angles): ), "Number of angles must match the number of projections" original_stack_shape = self.stack_shape - sinogram = self.stack_reshape(-1) + sinogram = xp.asarray(self.stack_reshape(-1)._data) L = self.n_radial_points sinogram = fft.ifftshift(sinogram, axes=-1) sinogram_ft = fft.rfft(sinogram, axis=-1) # grid generation with real points - y_idx = xp.fft.rfftfreq(self.n_radial_points) * xp.pi * 2 + y_idx = fft.rfftfreq(self.n_radial_points) * xp.pi * 2 n_real_points = len(y_idx) pts = xp.empty((2, len(angles), n_real_points), dtype=self.dtype) pts[0] = y_idx[xp.newaxis, :] * xp.sin(angles)[:, xp.newaxis] @@ -134,4 +134,4 @@ def back_project(self, angles): # normalization which gives us roughly the same error regardless of angles imgs = imgs / (n_real_points * len(angles)) - return aspire.image.Image(imgs).stack_reshape(original_stack_shape) + return aspire.image.Image(xp.asnumpy(imgs)).stack_reshape(original_stack_shape) diff --git a/tests/test_sinogram.py b/tests/test_sinogram.py index d186866012..ddef7c480f 100644 --- a/tests/test_sinogram.py +++ b/tests/test_sinogram.py @@ -4,7 +4,6 @@ from skimage.transform import iradon, radon from aspire.image import Image -from aspire.numeric import xp from aspire.utils import grid_2d # Relative tolerance comparing line projections to scikit @@ -149,10 +148,7 @@ def test_back_project_single(masked_image, num_ang): """ Test Line.backproject on a single stack of line projections or sinogram. Compares the reconstructed image to original. """ - angles = xp.asarray(np.linspace(0, 360, num_ang, endpoint=False)) - angles_np = xp.asnumpy( - angles - ) # skimage requires numpy array while we're using cupy arrays + angles = np.linspace(0, 360, num_ang, endpoint=False) rads = angles / 180 * np.pi sinogram = masked_image.project(rads) sinogram_np = sinogram.asnumpy() @@ -167,7 +163,7 @@ def test_back_project_single(masked_image, num_ang): our_back_project = back_project.asnumpy()[0] * mask # generating sci-kit image backproject method w/ no filter - sk_image_iradon = iradon(sinogram_np[0].T, theta=angles_np[::-1], filter_name=None) + sk_image_iradon = iradon(sinogram_np[0].T, theta=angles[::-1], filter_name=None) # we apply a normalized root mean square error on the images to find relative error to range of ref. image # Note: tolerance is typically < 0.2 regardless of angles, pixels, etc. @@ -192,8 +188,7 @@ def test_back_project_multidim(num_ang): # Generate images imgs = Image(np.random.random((m, n, L, L))) * mask - angles = xp.asarray(np.linspace(0, 360, num_ang, endpoint=False)) - angles_np = xp.asnumpy(angles) # need this for the skimage transformations + angles = np.linspace(0, 360, num_ang, endpoint=False) rads = angles / 180 * np.pi # apply a forward project on the image, then backwards @@ -214,7 +209,7 @@ def test_back_project_multidim(num_ang): # Next individually compute sk's iradon transform for each image. reference_back_projects[i, j] = iradon( - single_sinogram.asnumpy()[0].T, theta=angles_np[::-1], filter_name=None + single_sinogram.asnumpy()[0].T, theta=angles[::-1], filter_name=None ) # apply a mask, then find the NRMSE on the collection of images