Skip to content

Commit

Permalink
Merge pull request #1876 from jhlegarreta/FixTypos
Browse files Browse the repository at this point in the history
DOC: Fix typos
  • Loading branch information
skoudoro committed Jul 1, 2019
2 parents 7d5ccd7 + d1f0f34 commit 6290b9e
Show file tree
Hide file tree
Showing 48 changed files with 105 additions and 105 deletions.
2 changes: 1 addition & 1 deletion dipy/align/streamlinear.py
Expand Up @@ -1001,7 +1001,7 @@ def check_range(streamline, gt=greater_than, lt=less_than):
# SLR on QuickBundles centroids and some thresholding see
# Garyfallidis et al. Recognition of white matter
# bundles using local and global streamline-based registration and
# clustering, Neuroimage, 2017.
# clustering, NeuroImage, 2017.
whole_brain_slr = slr_with_qbx


Expand Down
2 changes: 1 addition & 1 deletion dipy/align/tests/test_imaffine.py
Expand Up @@ -321,7 +321,7 @@ def test_mi_gradient():
# Compute the gradient with the implementation under test
actual = mi_metric.gradient(theta)

# Compute the gradient using finite-diferences
# Compute the gradient using finite-differences
n = transform.get_number_of_parameters()
expected = np.empty(n, dtype=np.float64)

Expand Down
4 changes: 2 additions & 2 deletions dipy/align/tests/test_imwarp.py
Expand Up @@ -107,7 +107,7 @@ def test_diffeomorphic_map_2d():

# Now test the nearest neighbor interpolation
warped = diff_map.transform(moving_image, 'nearest')
# compare the images (now we dont have to worry about precision,
# compare the images (now we don't have to worry about precision,
# it is n.n.)
assert_array_almost_equal(warped, expected)

Expand Down Expand Up @@ -324,7 +324,7 @@ def test_optimizer_exceptions():
# Verify the default iterations list
assert_array_equal(optimizer.level_iters, [100, 100, 25])

# Verify exception thrown when attepting to fit the energy profile without
# Verify exception thrown when attempting to fit the energy profile without
# enough data
assert_raises(ValueError, optimizer._get_energy_derivative)

Expand Down
10 changes: 5 additions & 5 deletions dipy/align/tests/test_sumsqdiff.py
Expand Up @@ -27,7 +27,7 @@ def iterate_residual_field_ssd_2d(delta_field, sigmasq_field, grad, target,
for r in range(nrows):
for c in range(ncols):
sigmasq = sigmasq_field[r, c] if sigmasq_field is not None else 1
# This has to be done inside the neste loops because
# This has to be done inside the nested loops because
# some d[...] may have been previously modified
nn = 0
y[:] = 0
Expand Down Expand Up @@ -212,12 +212,12 @@ def test_compute_residual_displacement_field_ssd_2d():
else:
expected = target.copy().astype(np.float64)

# Expected residuals when sigma != infinte
# Expected residuals when sigma != infinity
expected[inf_sigma == 0, 0] -= grad_G[inf_sigma == 0, 0] * \
dp[inf_sigma == 0] + sigma_field[inf_sigma == 0] * s[inf_sigma == 0, 0]
expected[inf_sigma == 0, 1] -= grad_G[inf_sigma == 0, 1] * \
dp[inf_sigma == 0] + sigma_field[inf_sigma == 0] * s[inf_sigma == 0, 1]
# Expected residuals when sigma == infinte
# Expected residuals when sigma == infinity
expected[inf_sigma == 1] = -1.0 * s[inf_sigma == 1]

# Test residual field computation starting with residual = None
Expand Down Expand Up @@ -355,11 +355,11 @@ def test_compute_residual_displacement_field_ssd_3d():
else:
expected = target.copy().astype(np.float64)

# Expected residuals when sigma != infinte
# Expected residuals when sigma != infinity
for i in range(3):
expected[inf_sigma == 0, i] -= grad_G[inf_sigma == 0, i] * \
dp[inf_sigma == 0] + sigma_field[inf_sigma == 0] * s[inf_sigma == 0, i]
# Expected residuals when sigma == infinte
# Expected residuals when sigma == infinity
expected[inf_sigma == 1] = -1.0 * s[inf_sigma == 1]

# Test residual field computation starting with residual = None
Expand Down
4 changes: 2 additions & 2 deletions dipy/align/tests/test_transforms.py
Expand Up @@ -229,9 +229,9 @@ def test_invalid_transform():
# Note: users should not attempt to use the base class Transform:
# they should get an instance of one of its derived classes from the
# regtransforms dictionary (the base class is not contained there)
# If for some reason the user instanciates it and attempts to use it,
# If for some reason the user instantiates it and attempts to use it,
# however, it will raise exceptions when attempting to retrieve its
# jacobian, identity parameters or its matrix representation. It will
# Jacobian, identity parameters or its matrix representation. It will
# return -1 if queried about its dimension or number of parameters
transform = Transform()
theta = np.ndarray(3)
Expand Down
2 changes: 1 addition & 1 deletion dipy/core/tests/test_geometry.py
Expand Up @@ -270,7 +270,7 @@ def test_perpendicular_directions():
for vector_v in vectors_v:
pd = perpendicular_directions(vector_v, num=num, half=False)

# see if length of pd is equal to the number of intendend samples
# see if length of pd is equal to the number of intended samples
assert_equal(num, len(pd))

# check if all directions are perpendicular to vector v
Expand Down
6 changes: 3 additions & 3 deletions dipy/core/tests/test_gradients.py
Expand Up @@ -152,7 +152,7 @@ def test_gradient_table_from_bvals_bvecs():
gt = gradient_table_from_bvals_bvecs(bvals, new_bvecs, b0_threshold=0)
npt.assert_array_equal(gt.bvecs, bvecs)

# Bvalue > 0 for non-unit vector
# b-value > 0 for non-unit vector
bad_bvals = [2, 1, 2, 3, 4, 5, 6, 0]
npt.assert_raises(ValueError, gradient_table_from_bvals_bvecs, bad_bvals,
bvecs, b0_threshold=0.)
Expand Down Expand Up @@ -326,7 +326,7 @@ def test_nan_bvecs():
def test_generate_bvecs():
"""Tests whether we have properly generated bvecs.
"""
# Test if the generated bvectors are unit vectors
# Test if the generated b-vectors are unit vectors
bvecs = generate_bvecs(100)
norm = [np.linalg.norm(v) for v in bvecs]
npt.assert_almost_equal(norm, np.ones(100))
Expand All @@ -351,7 +351,7 @@ def test_round_bvals():
b = round_bvals(bvals, bmag=0)
npt.assert_array_almost_equal(bvals, b)

# Case that b-valuea are in ms/um2
# Case that b-value are in ms/um2
bvals = np.array([0.995, 0.995, 0.995, 0.995, 2.005, 2.005, 2.005, 2.005,
0])
b = round_bvals(bvals)
Expand Down
4 changes: 2 additions & 2 deletions dipy/denoise/noise_estimate.py
Expand Up @@ -57,7 +57,7 @@ def piesno(data, N, alpha=0.01, l=100, itermax=100, eps=1e-5,
reached if two subsequent estimates are smaller than eps.
return_mask : bool
If True, return a mask identyfing all the pure noise voxel
If True, return a mask identifying all the pure noise voxel
that were found.
Returns
Expand Down Expand Up @@ -171,7 +171,7 @@ def _piesno_3D(data, N, alpha=0.01, l=100, itermax=100, eps=1e-5,
Default: 1e-5.
return_mask : bool (optional)
If True, return a mask identyfing all the pure noise voxel
If True, return a mask identifying all the pure noise voxel
that were found. Default: False.
initial_estimation : float (optional)
Expand Down
2 changes: 1 addition & 1 deletion dipy/denoise/shift_twist_convolution.pyx
Expand Up @@ -179,7 +179,7 @@ cdef double [:, :, :, ::1] perform_convolution (double [:, :, :, ::1] odfs,

with nogil:

# loop over ODFs cx,cy,cz,corient --> y and v
# loop over ODFs cx,cy,cz,orient --> y and v
for corient in prange(OR1, schedule='guided'):
for cx in range(nx):
for cy in range(ny):
Expand Down
6 changes: 3 additions & 3 deletions dipy/denoise/tests/test_lpca.py
Expand Up @@ -27,7 +27,7 @@ def rfiw_phantom(gtab, snr=None):
slice_ind[3, 4:7, :] = 8
slice_ind[3, 7, :] = 9

# Define tisse diffusion parameters
# Define tissue diffusion parameters
# Restricted diffusion
ADr = 0.99e-3
RDr = 0.0
Expand All @@ -54,7 +54,7 @@ def rfiw_phantom(gtab, snr=None):
# tissue volume fractions have to be adjusted to the measured f values when
# constant S0 are assumed constant. Doing this correction, simulations will
# be analogous to simulates that S0 are different for each media. (For more
# datails on this contact the phantom designer)
# details on this contact the phantom designer)
f1 = f * S1 / S0

mevals = np.array([[ADr, RDr, RDr], [ADh, RDh, RDh],
Expand Down Expand Up @@ -189,7 +189,7 @@ def test_phantom():
gtab = gen_gtab()
DWI_clean = rfiw_phantom(gtab, snr=None)
DWI, sigma = rfiw_phantom(gtab, snr=30)
# To test without rician correction
# To test without Rician correction
temp = (DWI_clean / sigma)**2
DWI_clean_wrc = (sigma * np.sqrt(np.pi / 2) * np.exp(-0.5 * temp) *
((1 + 0.5 * temp) * sps.iv(0, 0.25 * temp) + 0.5 * temp *
Expand Down
2 changes: 1 addition & 1 deletion dipy/direction/tests/test_pmf.py
Expand Up @@ -67,7 +67,7 @@ def test_boot_pmf():
npt.assert_equal(len(hsph_updated.vertices), no_boot_pmf.shape[0])
npt.assert_array_almost_equal(no_boot_pmf, model_pmf)

# test model sherical harminic order different than bootstrap order
# test model spherical harmonic order different than bootstrap order
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", category=UserWarning)
csd_model = ConstrainedSphericalDeconvModel(gtab, None, sh_order=6)
Expand Down
28 changes: 14 additions & 14 deletions dipy/reconst/dki.py
Expand Up @@ -240,7 +240,7 @@ def _F1m(a, b, c):
# Only computes F1 in voxels that have all eigenvalues larger than zero
cond0 = _positive_evals(a, b, c)

# Apply formula for non problematic plaussible cases, i.e. a!=b and a!=c
# Apply formula for non problematic plausible cases, i.e. a!=b and a!=c
cond1 = np.logical_and(cond0, np.logical_and(abs(a - b) >= a * er,
abs(a - c) >= a * er))
if np.sum(cond1) != 0:
Expand All @@ -254,23 +254,23 @@ def _F1m(a, b, c):
(3 * L1**2 - L1 * L2 - L1 * L3 - L2 * L3) /
(3 * L1 * np.sqrt(L2 * L3)) * RDm - 1)

# Resolve possible sigularity a==b
# Resolve possible singularity a==b
cond2 = np.logical_and(cond0, np.logical_and(abs(a - b) < a * er,
abs(a - c) > a * er))
if np.sum(cond2) != 0:
L1 = (a[cond2] + b[cond2]) / 2.
L3 = c[cond2]
F1[cond2] = _F2m(L3, L1, L1) / 2.

# Resolve possible sigularity a==c
# Resolve possible singularity a==c
cond3 = np.logical_and(cond0, np.logical_and(abs(a - c) < a * er,
abs(a - b) > a * er))
if np.sum(cond3) != 0:
L1 = (a[cond3] + c[cond3]) / 2.
L2 = b[cond3]
F1[cond3] = _F2m(L2, L1, L1) / 2

# Resolve possible sigularity a==b and a==c
# Resolve possible singularity a==b and a==c
cond4 = np.logical_and(cond0, np.logical_and(abs(a - c) < a * er,
abs(a - b) < a * er))
if np.sum(cond4) != 0:
Expand Down Expand Up @@ -328,7 +328,7 @@ def _F2m(a, b, c):
# Only computes F2 in voxels that have all eigenvalues larger than zero
cond0 = _positive_evals(a, b, c)

# Apply formula for non problematic plaussible cases, i.e. b!=c
# Apply formula for non problematic plausible cases, i.e. b!=c
cond1 = np.logical_and(cond0, (abs(b - c) > b * er))
if np.sum(cond1) != 0:
L1 = a[cond1]
Expand All @@ -340,14 +340,14 @@ def _F2m(a, b, c):
(((L2 + L3) / (np.sqrt(L2 * L3))) * RF +
((2. * L1 - L2 - L3) / (3. * np.sqrt(L2 * L3))) * RD - 2.)

# Resolve possible sigularity b==c
# Resolve possible singularity b==c
cond2 = np.logical_and(cond0, np.logical_and(abs(b - c) < b * er,
abs(a - b) > b * er))
if np.sum(cond2) != 0:
L1 = a[cond2]
L3 = (c[cond2] + b[cond2]) / 2.

# Cumpute alfa [1]_
# Compute alfa [1]_
x = 1. - (L1 / L3)
alpha = np.zeros(len(L1))
for i in range(len(x)):
Expand All @@ -360,7 +360,7 @@ def _F2m(a, b, c):
6. * ((L1 + 2. * L3)**2) / (144. * L3**2 * (L1 - L3)**2) * \
(L3 * (L1 + 2. * L3) + L1 * (L1 - 4. * L3) * alpha)

# Resolve possible sigularity a==b and a==c
# Resolve possible singularity a==b and a==c
cond3 = np.logical_and(cond0, np.logical_and(abs(b - c) < b * er,
abs(a - b) < b * er))
if np.sum(cond3) != 0:
Expand Down Expand Up @@ -794,7 +794,7 @@ def _G1m(a, b, c):
# Only computes G1 in voxels that have all eigenvalues larger than zero
cond0 = _positive_evals(a, b, c)

# Apply formula for non problematic plaussible cases, i.e. b!=c
# Apply formula for non problematic plausible cases, i.e. b!=c
cond1 = np.logical_and(cond0, (abs(b - c) > er))
if np.sum(cond1) != 0:
L1 = a[cond1]
Expand All @@ -804,7 +804,7 @@ def _G1m(a, b, c):
(L1 + L2 + L3)**2 / (18 * L2 * (L2 - L3)**2) * \
(2. * L2 + (L3**2 - 3 * L2 * L3) / np.sqrt(L2 * L3))

# Resolve possible sigularity b==c
# Resolve possible singularity b==c
cond2 = np.logical_and(cond0, abs(b - c) < er)
if np.sum(cond2) != 0:
L1 = a[cond2]
Expand Down Expand Up @@ -849,7 +849,7 @@ def _G2m(a, b, c):
kurtosis imaging. Magn Reson Med. 65(3), 823-836
"""
# Float error used to compare two floats, abs(l1 - l2) < er for l1 = l2
# Error is defined as five order of magnitude larger than system's epslon
# Error is defined as five order of magnitude larger than system's epsilon
er = np.finfo(a.ravel()[0]).eps * 1e5

# Initialize G2
Expand All @@ -858,7 +858,7 @@ def _G2m(a, b, c):
# Only computes G2 in voxels that have all eigenvalues larger than zero
cond0 = _positive_evals(a, b, c)

# Apply formula for non problematic plaussible cases, i.e. b!=c
# Apply formula for non problematic plausible cases, i.e. b!=c
cond1 = np.logical_and(cond0, (abs(b - c) > er))
if np.sum(cond1) != 0:
L1 = a[cond1]
Expand All @@ -868,7 +868,7 @@ def _G2m(a, b, c):
(L1 + L2 + L3)**2 / (3 * (L2 - L3)**2) * \
((L2 + L3) / np.sqrt(L2 * L3) - 2)

# Resolve possible sigularity b==c
# Resolve possible singularity b==c
cond2 = np.logical_and(cond0, abs(b - c) < er)
if np.sum(cond2) != 0:
L1 = a[cond2]
Expand Down Expand Up @@ -1824,7 +1824,7 @@ def wls_fit_dki(design_matrix, data):

tol = 1e-6

# preparing data and initializing parametres
# preparing data and initializing parameters
data = np.asarray(data)
data_flat = data.reshape((-1, data.shape[-1]))
dki_params = np.empty((len(data_flat), 27))
Expand Down
2 changes: 1 addition & 1 deletion dipy/reconst/dki_micro.py
Expand Up @@ -134,7 +134,7 @@ def diffusion_components(dki_params, sphere='repulsion100', awf=None,
edt_all = np.zeros(shape + (6,))
idt_all = np.zeros(shape + (6,))

# Generate matrix that converts apparant diffusion coefficients to tensors
# Generate matrix that converts apparent diffusion coefficients to tensors
B = np.zeros((sphere.x.size, 6))
B[:, 0] = sphere.x * sphere.x # Bxx
B[:, 1] = sphere.x * sphere.y * 2. # Bxy
Expand Down
4 changes: 2 additions & 2 deletions dipy/reconst/fwdti.py
Expand Up @@ -295,7 +295,7 @@ def wls_iter(design_matrix, sig, S0, Diso=3e-3, mdreg=2.7e-3,
SA = SI - FS*S0*SFW.T
# SA < 0 means that the signal components from the free water
# component is larger than the total fiber. This cases are present
# for inapropriate large volume fractions (given the current S0
# for inappropriate large volume fractions (given the current S0
# value estimated). To overcome this issue negative SA are replaced
# by data's min positive signal.
SA[SA <= 0] = min_signal
Expand Down Expand Up @@ -471,7 +471,7 @@ def _nls_err_func(tensor_elements, design_matrix, data, Diso=3e-3,
w = 1/(sigma**2)

elif weighting == 'gmm':
# We use the Geman McClure M-estimator to compute the weights on the
# We use the Geman-McClure M-estimator to compute the weights on the
# residuals:
C = 1.4826 * np.median(np.abs(residuals - np.median(residuals)))
with warnings.catch_warnings():
Expand Down
6 changes: 3 additions & 3 deletions dipy/reconst/ivim.py
Expand Up @@ -319,12 +319,12 @@ def fit(self, data):
-------
IvimFit object
"""
# Get S0_prime and D - paramters assuming a single exponential decay
# Get S0_prime and D - parameters assuming a single exponential decay
# for signals for bvals greater than `split_b_D`
S0_prime, D = self.estimate_linear_fit(
data, self.split_b_D, less_than=False)

# Get S0 and D_star_prime - paramters assuming a single exponential
# Get S0 and D_star_prime - parameters assuming a single exponential
# decay for for signals for bvals greater than `split_b_S0`.

S0, D_star_prime = self.estimate_linear_fit(data, self.split_b_S0,
Expand Down Expand Up @@ -739,7 +739,7 @@ def ivim_mix_cost_one(self, phi, signal):
(2016).
"""
# moore-penrose
# Moore-Penrose
phi_mp = np.dot(np.linalg.inv(np.dot(phi.T, phi)), phi.T)
f = np.dot(phi_mp, signal)
yhat = np.dot(phi, f) # - sigma
Expand Down
2 changes: 1 addition & 1 deletion dipy/reconst/mcsd.py
Expand Up @@ -165,7 +165,7 @@ def __init__(self, gtab, response, reg_sphere=default_sphere, iso=2):
References
----------
.. [1] Jeurissen, B., et al. NeuroImage 2014. Multi-tissue constrained
spherical deconvolution for improved analysisof multi-shell
spherical deconvolution for improved analysis of multi-shell
diffusion MRI data
.. [2] Tournier, J.D., et al. NeuroImage 2007. Robust determination of
the fibre orientation distribution in diffusion MRI:
Expand Down

0 comments on commit 6290b9e

Please sign in to comment.