Skip to content

Commit

Permalink
black formatting
Browse files Browse the repository at this point in the history
  • Loading branch information
Andrew McCluskey committed Apr 23, 2020
1 parent 2d1e0aa commit 015114b
Show file tree
Hide file tree
Showing 11 changed files with 352 additions and 196 deletions.
13 changes: 2 additions & 11 deletions islatu/background.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,18 +73,9 @@ def fit_gaussian_2d(image, image_e, p0=None, bounds=None):
if bounds is None:
bounds = (
0,
[
image.shape[0],
image.shape[1],
100,
100,
image.max(),
image.max() * 10,
],
[image.shape[0], image.shape[1], 100, 100, image.max(), image.max() * 10,],
)
abscissa = np.array(
np.mgrid[0 : image.shape[0] : 1, 0 : image.shape[1] : 1]
)
abscissa = np.array(np.mgrid[0 : image.shape[0] : 1, 0 : image.shape[1] : 1])
# Perform the fitting
popt, pcov = curve_fit(
bivariate_normal,
Expand Down
11 changes: 7 additions & 4 deletions islatu/corrections.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,13 +33,14 @@ def footprint_correction(beam_width, sample_size, theta):
upper = (unp.nominal_values(length) + unp.std_devs(length)) / 2.0 / beam_sd
lower = (unp.nominal_values(length) - unp.std_devs(length)) / 2.0 / beam_sd
probability = 2.0 * (
unp.uarray(norm.cdf(mid), (norm.cdf(upper) - norm.cdf(lower)) / 2)
- 0.5
unp.uarray(norm.cdf(mid), (norm.cdf(upper) - norm.cdf(lower)) / 2) - 0.5
)
return probability


def get_interpolator(file_path, parser, q_axis_name='qdcd_', intensity_axis_name='adc2'):
def get_interpolator(
file_path, parser, q_axis_name="qdcd_", intensity_axis_name="adc2"
):
"""
Get an interpolator object from scipy, this is useful for the DCD q-normalisation step.
Expand All @@ -56,4 +57,6 @@ def get_interpolator(file_path, parser, q_axis_name='qdcd_', intensity_axis_name
- :py:attr:`int`: Degree of spline.
"""
normalisation_data = parser(file_path)[1]
return splrep(normalisation_data[q_axis_name], normalisation_data[intensity_axis_name])
return splrep(
normalisation_data[q_axis_name], normalisation_data[intensity_axis_name]
)
4 changes: 1 addition & 3 deletions islatu/image.py
Original file line number Diff line number Diff line change
Expand Up @@ -150,9 +150,7 @@ def crop(self, crop_function, **kwargs):
"""
self.array = unp.uarray(*crop_function(self.n, self.s, **kwargs))

def background_subtraction(
self, background_subtraction_function, **kwargs
):
def background_subtraction(self, background_subtraction_function, **kwargs):
"""
Perform a background subtraction based on some function.
Expand Down
84 changes: 53 additions & 31 deletions islatu/refl_data.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,14 +6,15 @@
# Distributed under the terms of the MIT License
# author: Andrew R. McCluskey

from os import path
from os import path
import numpy as np
from scipy.constants import physical_constants
from scipy.interpolate import splev
from uncertainties import ufloat
from uncertainties import unumpy as unp
from islatu import corrections, image, stitching


class Profile:
"""
This class stores information about the reflectometry profile.
Expand All @@ -29,7 +30,10 @@ class Profile:
q_axis_name (:py:attr:`str`, optional): Label for the q-axis in the scan. Defaults to :py:attr:`'q_axis_name'`.
theta_axis_name (:py:attr:`str`, optional): Label for the theta axis in the scan. Defaults to :py:attr:`'dcdtheta'`.
"""
def __init__(self, file_paths, parser, q_axis_name="qdcd", theta_axis_name="dcdtheta"):

def __init__(
self, file_paths, parser, q_axis_name="qdcd", theta_axis_name="dcdtheta"
):
self.scans = []
for f in file_paths:
self.scans.append(Scan(f, parser, q_axis_name, theta_axis_name))
Expand Down Expand Up @@ -75,7 +79,7 @@ def dq(self):
:py:attr:`array_like`: q-value uncertainties.
"""
return unp.std_devs(self.q_vectors)

def crop_and_bkg_sub(
self,
crop_function,
Expand All @@ -95,8 +99,10 @@ def crop_and_bkg_sub(
progress (:py:attr:`bool`, optional): Show a progress bar. Requires the :py:mod:`tqdm` package. Defaults to :py:attr:`True`.
"""
for s in self.scans:
s.crop_and_bkg_sub(crop_function, bkg_sub_function, crop_kwargs, bkg_sub_kwargs, progress)

s.crop_and_bkg_sub(
crop_function, bkg_sub_function, crop_kwargs, bkg_sub_kwargs, progress
)

def footprint_correction(self, beam_width, sample_size):
"""
Class method for :func:`~islatu.refl_data.Scan.footprint_correction` for each :py:class:`~Scan` in the list.
Expand All @@ -123,7 +129,7 @@ def q_uncertainty_from_pixel(
detector_distance=None,
energy=None,
pixel_size=172e-6,
):
):
"""
Class method for :func:`~islatu.refl_data.Scan.q_uncertainty_from_pixel` for each :py:class:`~Scan` in the list.
Expand All @@ -134,8 +140,10 @@ def q_uncertainty_from_pixel(
pixel_size (:py:attr:`float`, optional): Pixel size in metres
"""
for s in self.scans:
s.q_uncertainty_from_pixel(number_of_pixels, detector_distance, energy, pixel_size)

s.q_uncertainty_from_pixel(
number_of_pixels, detector_distance, energy, pixel_size
)

def qdcd_normalisation(self, itp):
"""
Class method for :func:`~islatu.refl_data.Scan.qdcd_normalisation` for each :py:class:`~Scan` in the list.
Expand All @@ -145,7 +153,7 @@ def qdcd_normalisation(self, itp):
"""
for s in self.scans:
s.qdcd_normalisation(itp)

def concatenate(self):
"""
Class method for :func:`~islatu.stitching.concatenate`.
Expand All @@ -159,7 +167,9 @@ def normalise_ter(self, max_q=0.1):
Args:
max_q (:py:attr:`float`): The maximum q to be included in finding the critical angle.
"""
self.reflected_intensity = stitching.normalise_ter(self.q_vectors, self.reflected_intensity, max_q)
self.reflected_intensity = stitching.normalise_ter(
self.q_vectors, self.reflected_intensity, max_q
)

def rebin(self, new_q=None, number_of_q_vectors=400):
"""
Expand All @@ -169,8 +179,10 @@ def rebin(self, new_q=None, number_of_q_vectors=400):
new_q (:py:attr:`array_like`): Array of potential q-values. Defaults to :py:attr:`None`.
number_of_q_vectors (:py:attr:`int`, optional): The max number of q-vectors to be using initially in the rebinning of the data. Defaults to :py:attr:`400`.
"""
self.q_vectors, self.reflected_intensity = stitching.rebin(self.q_vectors, self.reflected_intensity, new_q, number_of_q_vectors)

self.q_vectors, self.reflected_intensity = stitching.rebin(
self.q_vectors, self.reflected_intensity, new_q, number_of_q_vectors
)


class Scan:
"""
Expand All @@ -191,33 +203,39 @@ class Scan:
q_axis_name (:py:attr:`str`, optional): Label for the q-axis in the scan. Defaults to :py:attr:`'q_axis_name'`.
theta_axis_name (:py:attr:`str`, optional): Label for the theta axis in the scan. Defaults to :py:attr:`'dcdtheta'`.
"""

def __init__(
self, file_path, parser, q_axis_name="qdcd", theta_axis_name="dcdtheta", energy=None
self,
file_path,
parser,
q_axis_name="qdcd",
theta_axis_name="dcdtheta",
energy=None,
):
self.file_path = file_path
self.metadata, self.data = parser(self.file_path)
if q_axis_name is None:
h = physical_constants["Planck constant in eV s"][0] * 1e-3
c = physical_constants["speed of light in vacuum"][0] * 1e10
if energy is None:
energy = self.metadata['dcm1energy'][0]
q = energy * 4 * np.pi * unp.sin(unp.radians(self.data[theta_axis_name])) / (h * c)
self.q = unp.uarray(
q, np.zeros(self.data[theta_axis_name].size)
energy = self.metadata["dcm1energy"][0]
q = (
energy
* 4
* np.pi
* unp.sin(unp.radians(self.data[theta_axis_name]))
/ (h * c)
)
self.q = unp.uarray(q, np.zeros(self.data[theta_axis_name].size))
else:
self.q = unp.uarray(
self.data[q_axis_name], np.zeros(self.data[q_axis_name].size)
)
self.data = self._check_files_exist()
self.theta = unp.uarray(
self.data[theta_axis_name],
np.zeros(self.data[theta_axis_name].size),
)
self.R = unp.uarray(
np.zeros(self.q.size),
np.zeros(self.q.size),
self.data[theta_axis_name], np.zeros(self.data[theta_axis_name].size),
)
self.R = unp.uarray(np.zeros(self.q.size), np.zeros(self.q.size),)
self.n_pixels = np.zeros(self.q.size)

def __str__(self):
Expand All @@ -227,7 +245,9 @@ def __str__(self):
Returns:
:py:attr:`str`: Description of scan.
"""
return 'The file: {} contains {} images from q = {:.4f} to q = {:.4f}.'.format(self.file_path, self.q.size, self.q[0].n, self.q[-1].n)
return "The file: {} contains {} images from q = {:.4f} to q = {:.4f}.".format(
self.file_path, self.q.size, self.q[0].n, self.q[-1].n
)

def __repr__(self):
"""
Expand All @@ -236,7 +256,7 @@ def __repr__(self):
Returns:
:py:attr:`str`: Description of scan.
"""
return self.__str__()
return self.__str__()

def _check_files_exist(self):
"""
Expand All @@ -261,10 +281,14 @@ def _check_files_exist(self):
im_file = self.data["file"][i].split(path.sep)[-1]
im_file = path.join(path.dirname(self.file_path), im_file)
if path.isfile(im_file):
self.data.iloc[i, self.data.keys().get_loc("file")] = im_file
self.data.iloc[i, self.data.keys().get_loc("file")] = im_file
continue
else:
raise FileNotFoundError("The following image file could not be found: {}.".format(self.data["file"][i]))
raise FileNotFoundError(
"The following image file could not be found: {}.".format(
self.data["file"][i]
)
)
return self.data

def crop_and_bkg_sub(
Expand Down Expand Up @@ -308,9 +332,7 @@ def footprint_correction(self, beam_width, sample_size):
sample_size (:py:class:`uncertainties.core.Variable`): Width of sample in the dimension of the beam, in metres.
theta (:py:attr:`float`): Incident angle, in degrees.
"""
self.R /= corrections.footprint_correction(
beam_width, sample_size, self.theta
)
self.R /= corrections.footprint_correction(beam_width, sample_size, self.theta)

def transmission_normalisation(self):
"""
Expand Down Expand Up @@ -385,4 +407,4 @@ def _get_iterator(q, progress):
"For the progress bar, you need to have the tqdm package "
"installed. No progress bar will be shown"
)
return iterator
return iterator
4 changes: 1 addition & 3 deletions islatu/stitching.py
Original file line number Diff line number Diff line change
Expand Up @@ -97,9 +97,7 @@ def rebin(q_vectors, reflected_intensity, new_q=None, number_of_q_vectors=400):
new_q = new_q
else:
new_q = np.logspace(
np.log10(q_vectors[0].n),
np.log10(q_vectors[-1].n),
number_of_q_vectors,
np.log10(q_vectors[0].n), np.log10(q_vectors[-1].n), number_of_q_vectors,
)

binned_q = unp.uarray(np.zeros_like(new_q), np.zeros_like(new_q))
Expand Down
17 changes: 5 additions & 12 deletions islatu/tests/test_background.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,23 +17,20 @@ class TestBackground(TestCase):
"""
Unit tests for background module
"""

def test_bivariate_normal(self):
"""
A simple test for the generation of a bivariate normal distribution.
"""
x_1 = np.linspace(-1, 1, 100)
x_2 = np.linspace(-1, 1, 100)
input_x = np.array([x_1, x_2])
abscissa = np.array(
np.mgrid[0 : len(input_x[0]) : 1, 0 : len(input_x[1]) : 1]
)
abscissa = np.array(np.mgrid[0 : len(input_x[0]) : 1, 0 : len(input_x[1]) : 1])

output = background.bivariate_normal(abscissa, 0, 0, 1, 1, 1, 10)
assert_equal(10000, output.size)
output = output.reshape((100, 100))
max_inten = np.unravel_index(
np.argmax(output, axis=None), output.shape
)
max_inten = np.unravel_index(np.argmax(output, axis=None), output.shape)
assert_equal([0, 0], max_inten)

def test_fit_gaussian_2d(self):
Expand All @@ -43,16 +40,12 @@ def test_fit_gaussian_2d(self):
x_1 = np.linspace(-1, 1, 10)
x_2 = np.linspace(-1, 1, 10)
input_x = np.array([x_1, x_2])
abscissa = np.array(
np.mgrid[0 : len(input_x[0]) : 1, 0 : len(input_x[1]) : 1]
)
abscissa = np.array(np.mgrid[0 : len(input_x[0]) : 1, 0 : len(input_x[1]) : 1])
to_fit = background.bivariate_normal(abscissa, 0, 0, 1, 1, 15, 10)
to_fit = to_fit.reshape((10, 10))
to_fit_e = to_fit * 0.1

result = background.fit_gaussian_2d(to_fit, to_fit_e)
assert_almost_equal(
unp.nominal_values(result[0]), [0, 0, 1, 1, 15, 10]
)
assert_almost_equal(unp.nominal_values(result[0]), [0, 0, 1, 1, 15, 10])
assert_equal(4, result[1])
assert_equal(2, result[2])
9 changes: 5 additions & 4 deletions islatu/tests/test_corrections.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ class TestCorrections(TestCase):
"""
Unit tests for corrections module
"""

def test_geometry_correction(self):
"""
Test the implementation of the geometry correction.
Expand All @@ -27,17 +28,17 @@ def test_geometry_correction(self):
sample_size = ufloat(2e-3, 1e-5)
# The first value is below the spill over angle and the second is above
theta = np.array([0.01, 0.2])
result = corrections.footprint_correction(
beam_width, sample_size, theta
)
result = corrections.footprint_correction(beam_width, sample_size, theta)
assert_almost_equal(result[0].n, 0.006558435584346212)
assert_almost_equal(result[1].n, 0.1305814681032167)

def test_get_interpolator(self):
"""
Test the get interpolator
"""
file_name = path.join(path.dirname(islatu.__file__), 'tests/test_files/qdcd_norm.dat')
file_name = path.join(
path.dirname(islatu.__file__), "tests/test_files/qdcd_norm.dat"
)
itp = corrections.get_interpolator(file_name, io.i07_dat_parser)
assert_equal(isinstance(itp, tuple), True)
assert_equal(isinstance(itp[0], np.ndarray), True)
Expand Down
13 changes: 6 additions & 7 deletions islatu/tests/test_cropping.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ class TestCropping(unittest.TestCase):
"""
Unit tests for cropping module
"""

def test_crop_2d_a(self):
"""
Test crop_2d.
Expand Down Expand Up @@ -54,9 +55,7 @@ def test_crop_around_peak_2d_b(self):
initial_array[25, 25] = 100
expected_array = np.ones((20, 20))
expected_array[10, 10] = 100
result = cropping.crop_around_peak_2d(
initial_array
)
result = cropping.crop_around_peak_2d(initial_array)
assert_almost_equal(result, expected_array)

def test_crop_around_peak_2d_c(self):
Expand All @@ -67,9 +66,7 @@ def test_crop_around_peak_2d_c(self):
initial_array[25, 25] = 100
expected_array = np.ones((10, 20))
expected_array[5, 10] = 100
result = cropping.crop_around_peak_2d(
initial_array, x_size=10, y_size=20
)
result = cropping.crop_around_peak_2d(initial_array, x_size=10, y_size=20)
assert_almost_equal(result, expected_array)

def test_crop_around_peak_2d_error(self):
Expand All @@ -82,6 +79,8 @@ def test_crop_around_peak_2d_error(self):
expected_array = np.ones((10, 10))
expected_array[5, 5] = 100
expected_array_e = expected_array * 0.1
result = cropping.crop_around_peak_2d(initial_array, initial_array_e, x_size=10, y_size=10)
result = cropping.crop_around_peak_2d(
initial_array, initial_array_e, x_size=10, y_size=10
)
assert_almost_equal(result[0], expected_array)
assert_almost_equal(result[1], expected_array_e)
Loading

0 comments on commit 015114b

Please sign in to comment.