Skip to content

Commit

Permalink
Merge 6787053 into 2ce297b
Browse files Browse the repository at this point in the history
  • Loading branch information
aymgal committed May 5, 2021
2 parents 2ce297b + 6787053 commit ddd8c94
Show file tree
Hide file tree
Showing 21 changed files with 560 additions and 402 deletions.
14 changes: 8 additions & 6 deletions .travis.yml
Expand Up @@ -58,15 +58,17 @@ install:
- pip install -r test/test_requirements.txt
# - pip install python-pysap

# install the right branch of the fork of pysap
# # install the right branch of the fork of pysap
- cd $ORIGINAL_PATH
- cd ..
- mkdir $PACKAGES_DIR
- cd $PACKAGES_DIR
- git clone https://github.com/aymgal/pysap.git
- cd pysap
- git checkout dev-aym
- pip install .

# to install pySAP from a specific fork
# - cd $PACKAGES_DIR
# - git clone https://github.com/aymgal/pysap.git
# - cd pysap
# - git checkout master
# - pip install .
# - pip install -e git://github.com/aymgal/pysap@dev-aym#egg=pysap

# install the right branch of the fork of lenstronomy
Expand Down
2 changes: 1 addition & 1 deletion requirements.txt
Expand Up @@ -5,5 +5,5 @@ matplotlib
future
astropy
pybind11
python-pysap
python-pysap==0.0.5
# tensorflow>=2.3.0 # only for SSIM computation
7 changes: 6 additions & 1 deletion slitronomy/Optimization/model_manager.py
Expand Up @@ -156,10 +156,14 @@ def data_coord2pix(self, ra, dec):

@property
def num_pix_image(self):
if self._lensing_op is None:
return len(self.image_data)
return self._lensing_op.imagePlane.num_pix

@property
def num_pix_source(self):
if self._lensing_op is None:
return None
return self._lensing_op.sourcePlane.num_pix

@property
Expand Down Expand Up @@ -188,7 +192,8 @@ def _set_point_source_mask(self, mask_list):
def _set_likelihood_mask(self, mask):
self._mask = mask
self._mask_1d = util.image2array(mask)
self._lensing_op.set_likelihood_mask(mask)
if self._lensing_op is not None:
self._lensing_op.set_likelihood_mask(mask)

def _prepare_data(self):
num_pix_x, num_pix_y = self._data_class.num_pixel_axes
Expand Down
6 changes: 6 additions & 0 deletions slitronomy/Optimization/model_operators.py
Expand Up @@ -48,6 +48,8 @@ def M(self, image_2d):

def M_s(self, source_2d):
"""Apply source plane mask"""
if self._lensing_op is None:
return source_2d
return self._lensing_op.sourcePlane.effective_mask * source_2d

def H(self, array_2d):
Expand All @@ -64,10 +66,14 @@ def H_T(self, array_2d):

def F(self, source_2d):
"""alias method for lensing from source plane to image plane"""
if self._lensing_op is None:
return source_2d
return self._lensing_op.source2image_2d(source_2d)

def F_T(self, image_2d):
"""alias method for ray-tracing from image plane to source plane"""
if self._lensing_op is None:
return image_2d
return self._lensing_op.image2source_2d(image_2d)

def R(self, image_2d):
Expand Down
2 changes: 2 additions & 0 deletions slitronomy/Optimization/noise_levels.py
Expand Up @@ -144,13 +144,15 @@ def update_image_levels(self, num_pix_image, wavelet_transform_image):
dirac = util.dirac_impulse(num_pix_image)
dirac_coeffs2 = wavelet_transform_image(dirac)**2

# TODO: if it happens that noise_map is a constant value, not need to initialise a full array
noise_map = self.effective_noise_map #self.noise_map

n_scale, n_pix1, npix2 = dirac_coeffs2.shape
noise_levels = np.zeros((n_scale, n_pix1, npix2))
for scale_idx in range(n_scale):
scale_power2 = np.sum(dirac_coeffs2[scale_idx, :, :])
noise_levels[scale_idx, :, :] = noise_map * np.sqrt(scale_power2)

self._noise_levels_img = noise_levels

def _initialise_regridding_error(self, data_image, image_pixel_scale, source_pixel_scale):
Expand Down
36 changes: 36 additions & 0 deletions slitronomy/Optimization/proximals.py
Expand Up @@ -37,3 +37,39 @@ def prox_positivity(image_input):
image = np.copy(image_input)
image[image < 0] = 0.
return image


def full_prox_sparsity_positivity(image, transform, inverse_transform,
weights, noise_levels, thresh, thresh_increm,
n_scales, l_norm, formulation, force_positivity):
"""
returns the proximal operator of the regularisation term
g = lambda * |Phi^T HG|_0
or
g = lambda * |Phi^T HG|_1
"""
level_const = thresh * np.ones(n_scales)
level_const[0] += thresh_increm # possibly a stronger threshold for first decomposition levels (small scales features)
level_pixels = weights * noise_levels

if formulation == 'analysis':
coeffs = transform(image)
elif formulation == 'synthesis':
coeffs = image

# apply proximal operator
step = 1 # because threshold is already expressed in data units
coeffs_proxed = prox_sparsity_wavelets(coeffs, step=step,
level_const=level_const,
level_pixels=level_pixels,
l_norm=l_norm)
if formulation == 'analysis':
image_proxed = inverse_transform(coeffs_proxed)
elif formulation == 'synthesis':
image_proxed = coeffs_proxed

if force_positivity and formulation == 'analysis':
image_proxed = prox_positivity(image_proxed)
# TODO: apply positivity also in 'synthesis' formulation (i.e. to coeffs in starlet space?)

return image_proxed

0 comments on commit ddd8c94

Please sign in to comment.