diff --git a/dipy/align/streamlinear.py b/dipy/align/streamlinear.py index 4fffcd703a..7fb169c86a 100644 --- a/dipy/align/streamlinear.py +++ b/dipy/align/streamlinear.py @@ -1001,7 +1001,7 @@ def check_range(streamline, gt=greater_than, lt=less_than): # SLR on QuickBundles centroids and some thresholding see # Garyfallidis et al. Recognition of white matter # bundles using local and global streamline-based registration and -# clustering, Neuroimage, 2017. +# clustering, NeuroImage, 2017. whole_brain_slr = slr_with_qbx diff --git a/dipy/align/tests/test_imaffine.py b/dipy/align/tests/test_imaffine.py index 336b9ec008..a91a2368a3 100644 --- a/dipy/align/tests/test_imaffine.py +++ b/dipy/align/tests/test_imaffine.py @@ -321,7 +321,7 @@ def test_mi_gradient(): # Compute the gradient with the implementation under test actual = mi_metric.gradient(theta) - # Compute the gradient using finite-diferences + # Compute the gradient using finite-differences n = transform.get_number_of_parameters() expected = np.empty(n, dtype=np.float64) diff --git a/dipy/align/tests/test_imwarp.py b/dipy/align/tests/test_imwarp.py index 1580e175f2..7ec7292d0b 100644 --- a/dipy/align/tests/test_imwarp.py +++ b/dipy/align/tests/test_imwarp.py @@ -107,7 +107,7 @@ def test_diffeomorphic_map_2d(): # Now test the nearest neighbor interpolation warped = diff_map.transform(moving_image, 'nearest') - # compare the images (now we dont have to worry about precision, + # compare the images (now we don't have to worry about precision, # it is n.n.) assert_array_almost_equal(warped, expected) @@ -324,7 +324,7 @@ def test_optimizer_exceptions(): # Verify the default iterations list assert_array_equal(optimizer.level_iters, [100, 100, 25]) - # Verify exception thrown when attepting to fit the energy profile without + # Verify exception thrown when attempting to fit the energy profile without # enough data assert_raises(ValueError, optimizer._get_energy_derivative) diff --git a/dipy/align/tests/test_sumsqdiff.py b/dipy/align/tests/test_sumsqdiff.py index 597caabae5..6ee9b073b8 100644 --- a/dipy/align/tests/test_sumsqdiff.py +++ b/dipy/align/tests/test_sumsqdiff.py @@ -27,7 +27,7 @@ def iterate_residual_field_ssd_2d(delta_field, sigmasq_field, grad, target, for r in range(nrows): for c in range(ncols): sigmasq = sigmasq_field[r, c] if sigmasq_field is not None else 1 - # This has to be done inside the neste loops because + # This has to be done inside the nested loops because # some d[...] may have been previously modified nn = 0 y[:] = 0 @@ -212,12 +212,12 @@ def test_compute_residual_displacement_field_ssd_2d(): else: expected = target.copy().astype(np.float64) - # Expected residuals when sigma != infinte + # Expected residuals when sigma != infinity expected[inf_sigma == 0, 0] -= grad_G[inf_sigma == 0, 0] * \ dp[inf_sigma == 0] + sigma_field[inf_sigma == 0] * s[inf_sigma == 0, 0] expected[inf_sigma == 0, 1] -= grad_G[inf_sigma == 0, 1] * \ dp[inf_sigma == 0] + sigma_field[inf_sigma == 0] * s[inf_sigma == 0, 1] - # Expected residuals when sigma == infinte + # Expected residuals when sigma == infinity expected[inf_sigma == 1] = -1.0 * s[inf_sigma == 1] # Test residual field computation starting with residual = None @@ -355,11 +355,11 @@ def test_compute_residual_displacement_field_ssd_3d(): else: expected = target.copy().astype(np.float64) - # Expected residuals when sigma != infinte + # Expected residuals when sigma != infinity for i in range(3): expected[inf_sigma == 0, i] -= grad_G[inf_sigma == 0, i] * \ dp[inf_sigma == 0] + sigma_field[inf_sigma == 0] * s[inf_sigma == 0, i] - # Expected residuals when sigma == infinte + # Expected residuals when sigma == infinity expected[inf_sigma == 1] = -1.0 * s[inf_sigma == 1] # Test residual field computation starting with residual = None diff --git a/dipy/align/tests/test_transforms.py b/dipy/align/tests/test_transforms.py index d2354ba0db..06d4f707f8 100644 --- a/dipy/align/tests/test_transforms.py +++ b/dipy/align/tests/test_transforms.py @@ -229,9 +229,9 @@ def test_invalid_transform(): # Note: users should not attempt to use the base class Transform: # they should get an instance of one of its derived classes from the # regtransforms dictionary (the base class is not contained there) - # If for some reason the user instanciates it and attempts to use it, + # If for some reason the user instantiates it and attempts to use it, # however, it will raise exceptions when attempting to retrieve its - # jacobian, identity parameters or its matrix representation. It will + # Jacobian, identity parameters or its matrix representation. It will # return -1 if queried about its dimension or number of parameters transform = Transform() theta = np.ndarray(3) diff --git a/dipy/core/tests/test_geometry.py b/dipy/core/tests/test_geometry.py index 49fe02e690..461bbb4f83 100644 --- a/dipy/core/tests/test_geometry.py +++ b/dipy/core/tests/test_geometry.py @@ -270,7 +270,7 @@ def test_perpendicular_directions(): for vector_v in vectors_v: pd = perpendicular_directions(vector_v, num=num, half=False) - # see if length of pd is equal to the number of intendend samples + # see if length of pd is equal to the number of intended samples assert_equal(num, len(pd)) # check if all directions are perpendicular to vector v diff --git a/dipy/core/tests/test_gradients.py b/dipy/core/tests/test_gradients.py index a1b75d15fc..3121a640b3 100644 --- a/dipy/core/tests/test_gradients.py +++ b/dipy/core/tests/test_gradients.py @@ -152,7 +152,7 @@ def test_gradient_table_from_bvals_bvecs(): gt = gradient_table_from_bvals_bvecs(bvals, new_bvecs, b0_threshold=0) npt.assert_array_equal(gt.bvecs, bvecs) - # Bvalue > 0 for non-unit vector + # b-value > 0 for non-unit vector bad_bvals = [2, 1, 2, 3, 4, 5, 6, 0] npt.assert_raises(ValueError, gradient_table_from_bvals_bvecs, bad_bvals, bvecs, b0_threshold=0.) @@ -326,7 +326,7 @@ def test_nan_bvecs(): def test_generate_bvecs(): """Tests whether we have properly generated bvecs. """ - # Test if the generated bvectors are unit vectors + # Test if the generated b-vectors are unit vectors bvecs = generate_bvecs(100) norm = [np.linalg.norm(v) for v in bvecs] npt.assert_almost_equal(norm, np.ones(100)) @@ -351,7 +351,7 @@ def test_round_bvals(): b = round_bvals(bvals, bmag=0) npt.assert_array_almost_equal(bvals, b) - # Case that b-valuea are in ms/um2 + # Case that b-value are in ms/um2 bvals = np.array([0.995, 0.995, 0.995, 0.995, 2.005, 2.005, 2.005, 2.005, 0]) b = round_bvals(bvals) diff --git a/dipy/denoise/noise_estimate.py b/dipy/denoise/noise_estimate.py index afcfc789ce..121cf18db9 100644 --- a/dipy/denoise/noise_estimate.py +++ b/dipy/denoise/noise_estimate.py @@ -57,7 +57,7 @@ def piesno(data, N, alpha=0.01, l=100, itermax=100, eps=1e-5, reached if two subsequent estimates are smaller than eps. return_mask : bool - If True, return a mask identyfing all the pure noise voxel + If True, return a mask identifying all the pure noise voxel that were found. Returns @@ -171,7 +171,7 @@ def _piesno_3D(data, N, alpha=0.01, l=100, itermax=100, eps=1e-5, Default: 1e-5. return_mask : bool (optional) - If True, return a mask identyfing all the pure noise voxel + If True, return a mask identifying all the pure noise voxel that were found. Default: False. initial_estimation : float (optional) diff --git a/dipy/denoise/shift_twist_convolution.pyx b/dipy/denoise/shift_twist_convolution.pyx index 3debe71dfb..c5dd3c47e5 100644 --- a/dipy/denoise/shift_twist_convolution.pyx +++ b/dipy/denoise/shift_twist_convolution.pyx @@ -179,7 +179,7 @@ cdef double [:, :, :, ::1] perform_convolution (double [:, :, :, ::1] odfs, with nogil: - # loop over ODFs cx,cy,cz,corient --> y and v + # loop over ODFs cx,cy,cz,orient --> y and v for corient in prange(OR1, schedule='guided'): for cx in range(nx): for cy in range(ny): diff --git a/dipy/denoise/tests/test_lpca.py b/dipy/denoise/tests/test_lpca.py index 8295b914de..50236bec1c 100644 --- a/dipy/denoise/tests/test_lpca.py +++ b/dipy/denoise/tests/test_lpca.py @@ -27,7 +27,7 @@ def rfiw_phantom(gtab, snr=None): slice_ind[3, 4:7, :] = 8 slice_ind[3, 7, :] = 9 - # Define tisse diffusion parameters + # Define tissue diffusion parameters # Restricted diffusion ADr = 0.99e-3 RDr = 0.0 @@ -54,7 +54,7 @@ def rfiw_phantom(gtab, snr=None): # tissue volume fractions have to be adjusted to the measured f values when # constant S0 are assumed constant. Doing this correction, simulations will # be analogous to simulates that S0 are different for each media. (For more - # datails on this contact the phantom designer) + # details on this contact the phantom designer) f1 = f * S1 / S0 mevals = np.array([[ADr, RDr, RDr], [ADh, RDh, RDh], @@ -189,7 +189,7 @@ def test_phantom(): gtab = gen_gtab() DWI_clean = rfiw_phantom(gtab, snr=None) DWI, sigma = rfiw_phantom(gtab, snr=30) - # To test without rician correction + # To test without Rician correction temp = (DWI_clean / sigma)**2 DWI_clean_wrc = (sigma * np.sqrt(np.pi / 2) * np.exp(-0.5 * temp) * ((1 + 0.5 * temp) * sps.iv(0, 0.25 * temp) + 0.5 * temp * diff --git a/dipy/direction/tests/test_pmf.py b/dipy/direction/tests/test_pmf.py index 51a114a4b2..d3f376cf33 100644 --- a/dipy/direction/tests/test_pmf.py +++ b/dipy/direction/tests/test_pmf.py @@ -67,7 +67,7 @@ def test_boot_pmf(): npt.assert_equal(len(hsph_updated.vertices), no_boot_pmf.shape[0]) npt.assert_array_almost_equal(no_boot_pmf, model_pmf) - # test model sherical harminic order different than bootstrap order + # test model spherical harmonic order different than bootstrap order with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always", category=UserWarning) csd_model = ConstrainedSphericalDeconvModel(gtab, None, sh_order=6) diff --git a/dipy/reconst/dki.py b/dipy/reconst/dki.py index 66c75ece22..14ddf1d755 100644 --- a/dipy/reconst/dki.py +++ b/dipy/reconst/dki.py @@ -240,7 +240,7 @@ def _F1m(a, b, c): # Only computes F1 in voxels that have all eigenvalues larger than zero cond0 = _positive_evals(a, b, c) - # Apply formula for non problematic plaussible cases, i.e. a!=b and a!=c + # Apply formula for non problematic plausible cases, i.e. a!=b and a!=c cond1 = np.logical_and(cond0, np.logical_and(abs(a - b) >= a * er, abs(a - c) >= a * er)) if np.sum(cond1) != 0: @@ -254,7 +254,7 @@ def _F1m(a, b, c): (3 * L1**2 - L1 * L2 - L1 * L3 - L2 * L3) / (3 * L1 * np.sqrt(L2 * L3)) * RDm - 1) - # Resolve possible sigularity a==b + # Resolve possible singularity a==b cond2 = np.logical_and(cond0, np.logical_and(abs(a - b) < a * er, abs(a - c) > a * er)) if np.sum(cond2) != 0: @@ -262,7 +262,7 @@ def _F1m(a, b, c): L3 = c[cond2] F1[cond2] = _F2m(L3, L1, L1) / 2. - # Resolve possible sigularity a==c + # Resolve possible singularity a==c cond3 = np.logical_and(cond0, np.logical_and(abs(a - c) < a * er, abs(a - b) > a * er)) if np.sum(cond3) != 0: @@ -270,7 +270,7 @@ def _F1m(a, b, c): L2 = b[cond3] F1[cond3] = _F2m(L2, L1, L1) / 2 - # Resolve possible sigularity a==b and a==c + # Resolve possible singularity a==b and a==c cond4 = np.logical_and(cond0, np.logical_and(abs(a - c) < a * er, abs(a - b) < a * er)) if np.sum(cond4) != 0: @@ -328,7 +328,7 @@ def _F2m(a, b, c): # Only computes F2 in voxels that have all eigenvalues larger than zero cond0 = _positive_evals(a, b, c) - # Apply formula for non problematic plaussible cases, i.e. b!=c + # Apply formula for non problematic plausible cases, i.e. b!=c cond1 = np.logical_and(cond0, (abs(b - c) > b * er)) if np.sum(cond1) != 0: L1 = a[cond1] @@ -340,14 +340,14 @@ def _F2m(a, b, c): (((L2 + L3) / (np.sqrt(L2 * L3))) * RF + ((2. * L1 - L2 - L3) / (3. * np.sqrt(L2 * L3))) * RD - 2.) - # Resolve possible sigularity b==c + # Resolve possible singularity b==c cond2 = np.logical_and(cond0, np.logical_and(abs(b - c) < b * er, abs(a - b) > b * er)) if np.sum(cond2) != 0: L1 = a[cond2] L3 = (c[cond2] + b[cond2]) / 2. - # Cumpute alfa [1]_ + # Compute alfa [1]_ x = 1. - (L1 / L3) alpha = np.zeros(len(L1)) for i in range(len(x)): @@ -360,7 +360,7 @@ def _F2m(a, b, c): 6. * ((L1 + 2. * L3)**2) / (144. * L3**2 * (L1 - L3)**2) * \ (L3 * (L1 + 2. * L3) + L1 * (L1 - 4. * L3) * alpha) - # Resolve possible sigularity a==b and a==c + # Resolve possible singularity a==b and a==c cond3 = np.logical_and(cond0, np.logical_and(abs(b - c) < b * er, abs(a - b) < b * er)) if np.sum(cond3) != 0: @@ -794,7 +794,7 @@ def _G1m(a, b, c): # Only computes G1 in voxels that have all eigenvalues larger than zero cond0 = _positive_evals(a, b, c) - # Apply formula for non problematic plaussible cases, i.e. b!=c + # Apply formula for non problematic plausible cases, i.e. b!=c cond1 = np.logical_and(cond0, (abs(b - c) > er)) if np.sum(cond1) != 0: L1 = a[cond1] @@ -804,7 +804,7 @@ def _G1m(a, b, c): (L1 + L2 + L3)**2 / (18 * L2 * (L2 - L3)**2) * \ (2. * L2 + (L3**2 - 3 * L2 * L3) / np.sqrt(L2 * L3)) - # Resolve possible sigularity b==c + # Resolve possible singularity b==c cond2 = np.logical_and(cond0, abs(b - c) < er) if np.sum(cond2) != 0: L1 = a[cond2] @@ -849,7 +849,7 @@ def _G2m(a, b, c): kurtosis imaging. Magn Reson Med. 65(3), 823-836 """ # Float error used to compare two floats, abs(l1 - l2) < er for l1 = l2 - # Error is defined as five order of magnitude larger than system's epslon + # Error is defined as five order of magnitude larger than system's epsilon er = np.finfo(a.ravel()[0]).eps * 1e5 # Initialize G2 @@ -858,7 +858,7 @@ def _G2m(a, b, c): # Only computes G2 in voxels that have all eigenvalues larger than zero cond0 = _positive_evals(a, b, c) - # Apply formula for non problematic plaussible cases, i.e. b!=c + # Apply formula for non problematic plausible cases, i.e. b!=c cond1 = np.logical_and(cond0, (abs(b - c) > er)) if np.sum(cond1) != 0: L1 = a[cond1] @@ -868,7 +868,7 @@ def _G2m(a, b, c): (L1 + L2 + L3)**2 / (3 * (L2 - L3)**2) * \ ((L2 + L3) / np.sqrt(L2 * L3) - 2) - # Resolve possible sigularity b==c + # Resolve possible singularity b==c cond2 = np.logical_and(cond0, abs(b - c) < er) if np.sum(cond2) != 0: L1 = a[cond2] @@ -1824,7 +1824,7 @@ def wls_fit_dki(design_matrix, data): tol = 1e-6 - # preparing data and initializing parametres + # preparing data and initializing parameters data = np.asarray(data) data_flat = data.reshape((-1, data.shape[-1])) dki_params = np.empty((len(data_flat), 27)) diff --git a/dipy/reconst/dki_micro.py b/dipy/reconst/dki_micro.py index 8ec7002093..a7d875e1e8 100644 --- a/dipy/reconst/dki_micro.py +++ b/dipy/reconst/dki_micro.py @@ -134,7 +134,7 @@ def diffusion_components(dki_params, sphere='repulsion100', awf=None, edt_all = np.zeros(shape + (6,)) idt_all = np.zeros(shape + (6,)) - # Generate matrix that converts apparant diffusion coefficients to tensors + # Generate matrix that converts apparent diffusion coefficients to tensors B = np.zeros((sphere.x.size, 6)) B[:, 0] = sphere.x * sphere.x # Bxx B[:, 1] = sphere.x * sphere.y * 2. # Bxy diff --git a/dipy/reconst/fwdti.py b/dipy/reconst/fwdti.py index e694fbe454..20067bb44f 100644 --- a/dipy/reconst/fwdti.py +++ b/dipy/reconst/fwdti.py @@ -295,7 +295,7 @@ def wls_iter(design_matrix, sig, S0, Diso=3e-3, mdreg=2.7e-3, SA = SI - FS*S0*SFW.T # SA < 0 means that the signal components from the free water # component is larger than the total fiber. This cases are present - # for inapropriate large volume fractions (given the current S0 + # for inappropriate large volume fractions (given the current S0 # value estimated). To overcome this issue negative SA are replaced # by data's min positive signal. SA[SA <= 0] = min_signal @@ -471,7 +471,7 @@ def _nls_err_func(tensor_elements, design_matrix, data, Diso=3e-3, w = 1/(sigma**2) elif weighting == 'gmm': - # We use the Geman McClure M-estimator to compute the weights on the + # We use the Geman-McClure M-estimator to compute the weights on the # residuals: C = 1.4826 * np.median(np.abs(residuals - np.median(residuals))) with warnings.catch_warnings(): diff --git a/dipy/reconst/ivim.py b/dipy/reconst/ivim.py index dbbdf27769..7388cbc86e 100644 --- a/dipy/reconst/ivim.py +++ b/dipy/reconst/ivim.py @@ -319,12 +319,12 @@ def fit(self, data): ------- IvimFit object """ - # Get S0_prime and D - paramters assuming a single exponential decay + # Get S0_prime and D - parameters assuming a single exponential decay # for signals for bvals greater than `split_b_D` S0_prime, D = self.estimate_linear_fit( data, self.split_b_D, less_than=False) - # Get S0 and D_star_prime - paramters assuming a single exponential + # Get S0 and D_star_prime - parameters assuming a single exponential # decay for for signals for bvals greater than `split_b_S0`. S0, D_star_prime = self.estimate_linear_fit(data, self.split_b_S0, @@ -739,7 +739,7 @@ def ivim_mix_cost_one(self, phi, signal): (2016). """ - # moore-penrose + # Moore-Penrose phi_mp = np.dot(np.linalg.inv(np.dot(phi.T, phi)), phi.T) f = np.dot(phi_mp, signal) yhat = np.dot(phi, f) # - sigma diff --git a/dipy/reconst/mcsd.py b/dipy/reconst/mcsd.py index 46d6044ae5..f104d8d92c 100644 --- a/dipy/reconst/mcsd.py +++ b/dipy/reconst/mcsd.py @@ -165,7 +165,7 @@ def __init__(self, gtab, response, reg_sphere=default_sphere, iso=2): References ---------- .. [1] Jeurissen, B., et al. NeuroImage 2014. Multi-tissue constrained - spherical deconvolution for improved analysisof multi-shell + spherical deconvolution for improved analysis of multi-shell diffusion MRI data .. [2] Tournier, J.D., et al. NeuroImage 2007. Robust determination of the fibre orientation distribution in diffusion MRI: diff --git a/dipy/reconst/tests/test_dki.py b/dipy/reconst/tests/test_dki.py index 17a459088d..60face5490 100644 --- a/dipy/reconst/tests/test_dki.py +++ b/dipy/reconst/tests/test_dki.py @@ -47,10 +47,10 @@ evecs_cross[2], kt_cross), axis=0) # Simulation 2. Spherical kurtosis tensor.- for white matter, this can be a -# biological implaussible scenario, however this simulation is usefull for +# biological implausible scenario, however this simulation is useful for # testing the estimation of directional apparent kurtosis and the mean # kurtosis, since its directional and mean kurtosis ground truth are a constant -# which can be easly mathematicaly calculated. +# which can be calculated easily mathematically. Di = 0.00099 De = 0.00226 mevals_sph = np.array([[Di, Di, Di], [De, De, De]]) @@ -69,7 +69,7 @@ # Simulation 3. Multi-voxel simulations - dataset of four voxels is simulated. # Since the objective of this simulation is to see if procedures are able to -# work with multi-dimentional data all voxels contains the same crossing signal +# work with multi-dimensional data all voxels contains the same crossing signal # produced in simulation 1. DWI = np.zeros((2, 2, 1, len(gtab_2s.bvals))) @@ -182,7 +182,7 @@ def test_carlson_rf(): y = np.array([[2.0, 1.0], [3.0, 3.0]]) z = np.array([[0.0, 0.0], [4.0, 4.0]]) - # Defene reference outputs + # Define reference outputs RF_ref = np.array([[1.3110287771461, 1.8540746773014], [0.58408284167715, 0.58408284167715]]) @@ -197,7 +197,7 @@ def test_carlson_rf(): y = np.array([-1j, 1j, -1j, 1j]) z = np.array([0.0, 0.0, 2, 1 - 1j]) - # Defene reference outputs + # Define reference outputs RF_ref = np.array([1.8540746773014, 0.79612586584234 - 1.2138566698365j, 1.0441445654064, 0.93912050218619 - 0.53296252018635j]) # Compute integrals @@ -312,7 +312,7 @@ def test_Wrotate_crossing_fibers(): def test_Wcons(): - # Construct the 4D kurtosis tensor manualy from the crossing fiber kt + # Construct the 4D kurtosis tensor manually from the crossing fiber kt # simulate Wfit = np.zeros([3, 3, 3, 3]) @@ -528,10 +528,10 @@ def test_MK_singularities(): # test singularity L1 == L3 and L1 != L2 # since L1 is defined as the larger eigenvalue and L3 the smallest - # eigenvalue, this singularity teoretically will never be called, + # eigenvalue, this singularity theoretically will never be called, # because for L1 == L3, L2 have also to be = L1 and L2. # Nevertheless, I decided to include this test since this singularity - # is revelant for cases that eigenvalues are not ordered + # is relevant for cases that eigenvalues are not ordered # artificially revert the eigenvalue and eigenvector order dki_params = dkiF.model_params.copy() @@ -584,7 +584,7 @@ def test_dki_errors(): def test_kurtosis_maximum(): # TEST 1 - # simulate a crossing fibers interserting at 70 degrees. The first fiber + # simulate a crossing fibers intersecting at 70 degrees. The first fiber # is aligned to the x-axis while the second fiber is aligned to the x-z # plane with an angular deviation of 70 degrees from the first one. # According to Neto Henriques et al, 2015 (NeuroImage 111: 85-99), the diff --git a/dipy/reconst/tests/test_dki_micro.py b/dipy/reconst/tests/test_dki_micro.py index bdb08ec60f..64173f36ed 100644 --- a/dipy/reconst/tests/test_dki_micro.py +++ b/dipy/reconst/tests/test_dki_micro.py @@ -43,14 +43,14 @@ # approximation components larger than the fourth order. Thus parameter # estimates are only equal to the ground truth values of the simulation # if signals taylor components larger than the fourth order are removed. -# Signal whithout this taylor components can be generated using the +# Signal without this taylor components can be generated using the # multi_tensor_dki simulations. Therefore we used this function to test the # expected estimates of the model. DWIsim_all_taylor = np.zeros((2, 2, 2, gtab_2s.bvals.size)) # Signal with all taylor components can be simulated using the function -# multi_tensor. Generating this signals will be usefull to test the prediction +# multi_tensor. Generating this signals will be useful to test the prediction # procedures of DKI-based microstructural model. diff --git a/dipy/reconst/tests/test_dti.py b/dipy/reconst/tests/test_dti.py index a6cda62180..6829dda73e 100644 --- a/dipy/reconst/tests/test_dti.py +++ b/dipy/reconst/tests/test_dti.py @@ -345,7 +345,7 @@ def test_wls_and_ls_fit(): npt.assert_almost_equal(Y[0], b0) Y.shape = (-1,) + Y.shape - # Testing WLS Fit on Single Voxel + # Testing WLS Fit on single voxel # If you do something wonky (passing min_signal<0), you should get an # error: npt.assert_raises(ValueError, TensorModel, gtab, fit_method='WLS', diff --git a/dipy/reconst/tests/test_fwdti.py b/dipy/reconst/tests/test_fwdti.py index f39ee57571..558a7b5758 100644 --- a/dipy/reconst/tests/test_fwdti.py +++ b/dipy/reconst/tests/test_fwdti.py @@ -142,7 +142,7 @@ def test_fwdti_multi_voxel(): assert_almost_equal(Ffwe, GTF) assert_almost_equal(MDfwe, MDref) - # Test cholesky + # Test Cholesky fwdm = fwdti.FreeWaterTensorModel(gtab_2s, 'NLS', cholesky=True) fwefit = fwdm.fit(DWI) FAfwe = fwefit.fa diff --git a/dipy/reconst/tests/test_msdki.py b/dipy/reconst/tests/test_msdki.py index 07b9232cd7..c2eec17817 100644 --- a/dipy/reconst/tests/test_msdki.py +++ b/dipy/reconst/tests/test_msdki.py @@ -23,7 +23,7 @@ gtab_3s = gradient_table(bvals_3s, bvecs_3s) # Simulation 1. Spherical kurtosis tensor - MSK and MSD from the MSDKI model -# should be equa to the MK and MD of the DKI tensor for cases of +# should be equal to the MK and MD of the DKI tensor for cases of # spherical kurtosis tensors Di = 0.00099 De = 0.00226 @@ -124,7 +124,7 @@ def test_errors(): # fourth error raises if an given index point to more dimensions that data # does not contain - # define auxiliar function for the assert raises + # define auxiliary function for the assert raises def aux_test_fun(ob, ind): met = ob[ind].msk return met diff --git a/dipy/reconst/tests/test_qtdmri.py b/dipy/reconst/tests/test_qtdmri.py index 6f29f709cb..dfce6a58fe 100644 --- a/dipy/reconst/tests/test_qtdmri.py +++ b/dipy/reconst/tests/test_qtdmri.py @@ -158,7 +158,7 @@ def test_normalization_time(): def test_anisotropic_isotropic_equivalence(radial_order=4, time_order=2): - # generate qt-scheme and arbitary synthetic crossing data. + # generate qt-scheme and arbitrary synthetic crossing data. gtab_4d = generate_gtab4D() l1, l2, l3 = [0.0015, 0.0003, 0.0003] S = generate_signal_crossing(gtab_4d, l1, l2, l3) @@ -189,7 +189,7 @@ def test_anisotropic_isotropic_equivalence(radial_order=4, time_order=2): assert_array_almost_equal(pdf_aniso / pdf_aniso.max(), pdf_iso / pdf_aniso.max()) - # same norm of the laplacian + # same norm of the Laplacian norm_laplacian_aniso = qtdmri_fit_cart.norm_of_laplacian_signal() norm_laplacian_iso = qtdmri_fit_sphere.norm_of_laplacian_signal() assert_almost_equal(norm_laplacian_aniso / norm_laplacian_aniso, @@ -402,7 +402,7 @@ def test_q0_constraint_and_unity_of_ODFs(radial_order=6, time_order=2): assert_almost_equal(float(E_q0_first_tau), 1.) assert_almost_equal(float(E_q0_last_tau), 1.) - # test if maginal ODF integral in sh is equal to one + # test if marginal ODF integral in sh is equal to one # Integral of Y00 spherical harmonic is 1 / (2 * np.sqrt(np.pi)) # division with this results in normalization odf_sh = qtdmri_fit_lap.odf_sh(s=0, tau=tau.max()) diff --git a/dipy/reconst/tests/test_reco_utils.py b/dipy/reconst/tests/test_reco_utils.py index 776882e552..a38c2e63b4 100644 --- a/dipy/reconst/tests/test_reco_utils.py +++ b/dipy/reconst/tests/test_reco_utils.py @@ -27,7 +27,7 @@ def test_argmax_from_countarrs(): vertinds = np.arange(10, dtype=np.uint32) adj_counts = np.ones((10,), dtype=np.uint32) adj_inds_raw = np.arange(10, dtype=np.uint32)[::-1] - # when contigous - OK + # when contiguous - OK adj_inds = adj_inds_raw.copy() argmax_from_countarrs(vals, vertinds, adj_counts, adj_inds) # yield assert_array_equal(inds, [5, 6, 7, 8, 9]) diff --git a/dipy/segment/clustering.py b/dipy/segment/clustering.py index c27e4ee5dc..adedf1ac6e 100644 --- a/dipy/segment/clustering.py +++ b/dipy/segment/clustering.py @@ -443,7 +443,7 @@ class QuickBundles(Clustering): >>> from nibabel import trackvis as tv >>> streams, hdr = tv.read(get_fnames('fornix')) >>> streamlines = [i[0] for i in streams] - >>> # Segment fornix with a treshold of 10mm and streamlines resampled + >>> # Segment fornix with a threshold of 10mm and streamlines resampled >>> # to 12 points. >>> qb = QuickBundles(threshold=10.) >>> clusters = qb.cluster(streamlines) diff --git a/dipy/segment/clustering_algorithms.pyx b/dipy/segment/clustering_algorithms.pyx index a742d6018a..e1f943af54 100644 --- a/dipy/segment/clustering_algorithms.pyx +++ b/dipy/segment/clustering_algorithms.pyx @@ -112,7 +112,7 @@ def quickbundles(streamlines, Metric metric, double threshold, if not streamline.flags.writeable or streamline.dtype != DTYPE: streamline = streamline.astype(DTYPE) cluster_id = qb.assignment_step(streamline, idx) - # The update step is performed right after the assignement step instead + # The update step is performed right after the assignment step instead # of after all streamlines have been assigned like k-means algorithm. qb.update_step(cluster_id) diff --git a/dipy/segment/tests/test_quickbundles.py b/dipy/segment/tests/test_quickbundles.py index 54448abdad..852cc434c4 100644 --- a/dipy/segment/tests/test_quickbundles.py +++ b/dipy/segment/tests/test_quickbundles.py @@ -86,7 +86,7 @@ def test_quickbundles_2D(): # plt.plot(*zip(*data[10:, 0]), linestyle='None', marker='*') # plt.show() - # Theorically using a threshold above the following value will not + # Theoretically, using a threshold above the following value will not # produce expected results. threshold = np.sqrt(2*(10**2))-np.sqrt(2) metric = dipymetric.SumPointwiseEuclideanMetric() diff --git a/dipy/sims/tests/test_voxel.py b/dipy/sims/tests/test_voxel.py index fd2b3baa4b..0854d79764 100644 --- a/dipy/sims/tests/test_voxel.py +++ b/dipy/sims/tests/test_voxel.py @@ -27,7 +27,7 @@ def setup_module(): gtab_2s = gradient_table(bvals_2s, bvecs_2s) -# Unused with missing refernces to basis +# Unused with missing references to basis # def diff2eigenvectors(dx, dy, dz): # """ numerical derivatives 2 eigenvectors # """ @@ -214,7 +214,7 @@ def test_kurtosis_elements(): key = (i+1) * (j+1) * (k+1) * (l+1) assert_almost_equal(kurtosis_element(mD, frac, i, k, j, l), kt_ref[key]) - # Testing optional funtion inputs + # Testing optional function inputs assert_almost_equal(kurtosis_element(mD, frac, i, k, j, l), kurtosis_element(mD, frac, i, k, j, l, D, MD)) diff --git a/dipy/stats/analysis.py b/dipy/stats/analysis.py index 019f7152cb..412fcf581c 100644 --- a/dipy/stats/analysis.py +++ b/dipy/stats/analysis.py @@ -310,7 +310,7 @@ def gaussian_weights(bundle, n_points=100, return_mahalnobis=False, # This is a 3-by-3 array: node_coords = bundle.data[node::n_points] c = np.cov(node_coords.T, ddof=0) - # Reorganize as an upper diagonal matrix for expected Mahalnobis input: + # Reorganize as an upper diagonal matrix for expected Mahalanobis input: c = np.array([[c[0, 0], c[0, 1], c[0, 2]], [0, c[1, 1], c[1, 2]], [0, 0, c[2, 2]]]) @@ -321,7 +321,7 @@ def gaussian_weights(bundle, n_points=100, return_mahalnobis=False, for fn in range(len(bundle)): # In the special case where all the streamlines have the exact same # coordinate in this node, the covariance matrix is all zeros, so - # we can't calculate the Mahalnobis distance, we will instead give + # we can't calculate the Mahalanobis distance, we will instead give # each streamline an identical weight, equal to the number of # streamlines: if np.allclose(c, 0): diff --git a/dipy/stats/tests/test_analysis.py b/dipy/stats/tests/test_analysis.py index 6c604f8a5d..1597f6918b 100644 --- a/dipy/stats/tests/test_analysis.py +++ b/dipy/stats/tests/test_analysis.py @@ -77,7 +77,7 @@ def test_gaussian_weights(): w = gaussian_weights(bundle, n_points=10) npt.assert_almost_equal(w, np.ones((len(bundle), 10)) * 0.5) - # Test when asked to return Mahalnobis, instead of weights + # Test when asked to return Mahalanobis, instead of weights w = gaussian_weights(bundle, n_points=10, return_mahalnobis=True) npt.assert_almost_equal(w, np.ones((len(bundle), 10))) diff --git a/dipy/tracking/local/localtrack.pyx b/dipy/tracking/local/localtrack.pyx index 5c98514ba5..365ad4d49b 100644 --- a/dipy/tracking/local/localtrack.pyx +++ b/dipy/tracking/local/localtrack.pyx @@ -455,7 +455,7 @@ cdef _pft(np.float_t[:, :] streamline, # Resample the particles if the weights are too uneven. # Particles with negligible weights are replaced by duplicates of - # those with high weigths through resampling + # those with high weights through resampling N_effective = 1. / sum_squared if N_effective < particle_count / 10.: # copy data in the temp arrays diff --git a/dipy/tracking/local/tests/test_tissue_classifier.py b/dipy/tracking/local/tests/test_tissue_classifier.py index 14400fa71e..97715ae6b8 100644 --- a/dipy/tracking/local/tests/test_tissue_classifier.py +++ b/dipy/tracking/local/tests/test_tissue_classifier.py @@ -173,7 +173,7 @@ def test_cmc_tissue_classifier(): step_size=1, average_voxel_size=1) - # Test contructors + # Test constructors for idx in np.ndindex(wm.shape): idx = np.asarray(idx, dtype="float64") npt.assert_almost_equal(cmc_tc.get_include(idx), diff --git a/dipy/tracking/local/tests/test_tracking.py b/dipy/tracking/local/tests/test_tracking.py index 5b2a9dd5b8..b29a6e0b13 100644 --- a/dipy/tracking/local/tests/test_tracking.py +++ b/dipy/tracking/local/tests/test_tracking.py @@ -769,7 +769,7 @@ def test_affine_transformations(): # TST - combined affines a5 = a1 + a2 + a3 a5[3, 3] = 1 - # TST - in vivo affine exemple + # TST - in vivo affine example # Sometimes data have affines with tiny shear components. # For example, the small_101D data-set has some of that: fdata, _, _ = get_fnames('small_101D') @@ -780,7 +780,7 @@ def test_affine_transformations(): offset = affine[:3, 3] seeds_trans = [np.dot(lin, s) + offset for s in seeds] - # We compute the voxel size to ajust the step size to one voxel + # We compute the voxel size to adjust the step size to one voxel voxel_size = np.mean(np.sqrt(np.dot(lin, lin).diagonal())) streamlines = LocalTracking(direction_getter=dg, diff --git a/dipy/tracking/propspeed.pyx b/dipy/tracking/propspeed.pyx index 9eb9e74b44..da0bc50b9d 100644 --- a/dipy/tracking/propspeed.pyx +++ b/dipy/tracking/propspeed.pyx @@ -184,7 +184,7 @@ cdef void _trilinear_interpolation_iso(double *X, d[i] = X[i] - Xf[i] nd[i] = 1 - d[i] # weights - # the weights are actualy the volumes of the 8 smaller boxes that define + # the weights are actually the volumes of the 8 smaller boxes that define # the initial rectangular box for more on trilinear have a look here # http://en.wikipedia.org/wiki/Trilinear_interpolation # http://local.wasp.uwa.edu.au/~pbourke/miscellaneous/interpolation/index.html @@ -385,7 +385,7 @@ cdef cnp.npy_intp _initial_direction(double* seed,double *qa, for i from 0 <= i < 3: point[i] = floor(seed[i] + .5) point[3] = ref - # Find the offcet in memory to access the qa value + # Find the offset in memory to access the qa value off = offset(point,strides, 4, 8) qa_tmp = qa[off] # Check for scalar threshold diff --git a/dipy/tracking/tests/test_streamline.py b/dipy/tracking/tests/test_streamline.py index e1a5ac1473..8b722b8df0 100644 --- a/dipy/tracking/tests/test_streamline.py +++ b/dipy/tracking/tests/test_streamline.py @@ -556,7 +556,7 @@ def test_deform_streamlines(): # Put orig_streamlines_world into voxmm orig_streamlines = transform_streamlines(orig_streamlines_world, np.linalg.inv(stream2world)) - # All close because of floating pt inprecision + # All close because of floating pt imprecision for o, s in zip(orig_streamlines, streamlines): assert_allclose(s, o, rtol=1e-10, atol=0) @@ -644,7 +644,7 @@ def dist_to_line(prev, next, curr): def test_compress_streamlines(): for compress_func in [compress_streamlines_python, compress_streamlines]: - # Small streamlines (less than two points) are uncompressable. + # Small streamlines (less than two points) are incompressible. for small_streamline in [np.array([[]]), np.array([[1, 1, 1]]), np.array([[1, 1, 1], [2, 2, 2]])]: @@ -682,7 +682,7 @@ def test_compress_streamlines(): # (like the C++ version) compress_func(streamline, max_segment_length=np.inf) - # Uncompressable streamline when `tol_error` == 1. + # Incompressable streamline when `tol_error` == 1. simple_streamline = np.array([[0, 0, 0], [1, 1, 0], [1.5, np.inf, 0], @@ -696,7 +696,7 @@ def test_compress_streamlines(): assert_array_equal(c_streamline, simple_streamline) # Create a special streamline where every other point is increasingly - # farther from a straigth line formed by the streamline endpoints. + # farther from a straight line formed by the streamline endpoints. tol_errors = np.linspace(0, 10, 21) orthogonal_line = np.array([[-np.sqrt(2)/2, np.sqrt(2)/2, 0]], dtype=np.float32) diff --git a/dipy/tracking/utils.py b/dipy/tracking/utils.py index 9bde258ffc..b6924030f4 100644 --- a/dipy/tracking/utils.py +++ b/dipy/tracking/utils.py @@ -741,7 +741,7 @@ def near_roi(streamlines, region_of_interest, affine=None, tol=None, roi_coords = np.array(np.where(region_of_interest)).T x_roi_coords = apply_affine(affine, roi_coords) - # If it's already a list, we can save time by preallocating the output + # If it's already a list, we can save time by pre-allocating the output if isinstance(streamlines, list): out = np.zeros(len(streamlines), dtype=bool) for ii, sl in enumerate(streamlines): diff --git a/dipy/tracking/vox2track.pyx b/dipy/tracking/vox2track.pyx index ddb2207da5..09d1590a5b 100644 --- a/dipy/tracking/vox2track.pyx +++ b/dipy/tracking/vox2track.pyx @@ -144,7 +144,7 @@ def streamline_mapping(streamlines, voxel_size=None, affine=None, voxel_indices[j, 2]) uniq_points.add(point) - # Add the index of this streamline for each uniq voxel + # Add the index of this streamline for each unique voxel for point in uniq_points: if point in mapping: mapping[point].append(i) diff --git a/dipy/viz/projections.py b/dipy/viz/projections.py index 6c087c255c..41b16f4966 100644 --- a/dipy/viz/projections.py +++ b/dipy/viz/projections.py @@ -82,7 +82,7 @@ def sph_project(vertices, val, ax=None, vmin=None, vmax=None, cmap=None, # positive: neg_idx = np.where(verts_rot[0] > 0) - # rotate the entire bvector around to point in the other direction: + # rotate the entire b-vector around to point in the other direction: verts_rot[:, neg_idx] *= -1 _, theta, phi = geo.cart2sphere(verts_rot[0], verts_rot[1], verts_rot[2]) @@ -109,7 +109,7 @@ def sph_project(vertices, val, ax=None, vmin=None, vmax=None, cmap=None, r = (val - my_min) / float(my_max - my_min) - # Enforce the maximum and minumum boundaries, if there are values + # Enforce the maximum and minimum boundaries, if there are values # outside those boundaries: r[r < 0] = 0 r[r > 1] = 1 diff --git a/dipy/viz/tests/test_regtools.py b/dipy/viz/tests/test_regtools.py index ddfd07bc50..e81e2fcb8b 100644 --- a/dipy/viz/tests/test_regtools.py +++ b/dipy/viz/tests/test_regtools.py @@ -26,7 +26,7 @@ def test_plot_2d_diffeomorphic_map(): mapping = sdr.optimize(static, moving) # Smoke testing of plots ff = regtools.plot_2d_diffeomorphic_map(mapping, 10) - # Defualt shape is static shape, moving shape + # Default shape is static shape, moving shape npt.assert_equal(ff[0].shape, st_shape) npt.assert_equal(ff[1].shape, mv_shape) # Can specify shape diff --git a/dipy/workflows/align.py b/dipy/workflows/align.py index c29cf7c08d..cbfbfbba58 100644 --- a/dipy/workflows/align.py +++ b/dipy/workflows/align.py @@ -178,7 +178,7 @@ def run(self, static_files, moving_files, .. [Garyfallidis17] Garyfallidis et al. Recognition of white matter bundles using local and global streamline-based registration - and clustering, Neuroimage, 2017. + and clustering, NeuroImage, 2017. """ io_it = self.get_io_iterator() @@ -855,7 +855,7 @@ def run(self, static_image_files, moving_image_files, prealign_file='', raise ValueError("Invalid similarity metric: Please" " provide a valid metric like 'ssd', 'cc', 'em'") - logging.info("Starting Diffeormorphic Registration") + logging.info("Starting Diffeomorphic Registration") logging.info('Using {0} Metric'.format(metric.upper())) # Init parameter if they are not setup @@ -928,5 +928,5 @@ def run(self, static_image_files, moving_image_files, prealign_file='', # Saving logging.info('Saving warped {0}'.format(owarped_file)) save_nifti(owarped_file, warped_moving, static_grid2world) - logging.info('Saving Diffeormorphic map {0}'.format(omap_file)) + logging.info('Saving Diffeomorphic map {0}'.format(omap_file)) save_nifti(omap_file, mapping_data, mapping.codomain_world2grid) diff --git a/dipy/workflows/denoise.py b/dipy/workflows/denoise.py index c6af05cf25..5f2ad6c4bf 100644 --- a/dipy/workflows/denoise.py +++ b/dipy/workflows/denoise.py @@ -33,7 +33,7 @@ def run(self, input_files, sigma=0, out_dir='', out_dir : string, optional Output directory (default input file directory) out_denoised : string, optional - Name of the resuting denoised volume (default: dwi_nlmeans.nii.gz) + Name of the resulting denoised volume (default: dwi_nlmeans.nii.gz) """ io_it = self.get_io_iterator() for fpath, odenoised in io_it: diff --git a/dipy/workflows/flow_runner.py b/dipy/workflows/flow_runner.py index 52e08511a6..599e82e3ec 100644 --- a/dipy/workflows/flow_runner.py +++ b/dipy/workflows/flow_runner.py @@ -12,7 +12,7 @@ def get_level(lvl): - """ Transforms the loggin level passed on the commandline into a proper + """ Transforms the logging level passed on the commandline into a proper logging level name. """ try: @@ -47,7 +47,7 @@ def run_flow(flow): help='Prepend mixed input names to output names.') # Add logging parameters common to all workflows - msg = 'Log messsages display level. Accepted options include CRITICAL,' + msg = 'Log messages display level. Accepted options include CRITICAL,' msg += ' ERROR, WARNING, INFO, DEBUG and NOTSET (default INFO).' parser.add_argument('--log_level', action='store', dest='log_level', metavar='string', required=False, default='INFO', diff --git a/dipy/workflows/io.py b/dipy/workflows/io.py index 822104efc8..ed8a546ca4 100644 --- a/dipy/workflows/io.py +++ b/dipy/workflows/io.py @@ -71,8 +71,8 @@ def run(self, input_files, if os.path.basename(input_path).lower().find('bval') > -1: bvals = np.loadtxt(input_path) - logging.info('Bvalues \n{0}'.format(bvals)) - logging.info('Total number of bvalues {}'.format(len(bvals))) + logging.info('b-values \n{0}'.format(bvals)) + logging.info('Total number of b-values {}'.format(len(bvals))) shells = np.sum(np.diff(np.sort(bvals)) > bshell_thr) logging.info('Number of gradient shells {0}'.format(shells)) logging.info('Number of b0s {0} (b0_thr {1})\n' diff --git a/dipy/workflows/reconst.py b/dipy/workflows/reconst.py index ee544eff70..2655007100 100644 --- a/dipy/workflows/reconst.py +++ b/dipy/workflows/reconst.py @@ -50,7 +50,7 @@ def run(self, data_files, bvals_files, bvecs_files, small_delta, big_delta, files to an output directory specified by `out_dir`. In order for the MAPMRI workflow to work in the way - intended either the laplacian or positivity or both must + intended either the Laplacian or positivity or both must be set to True. Parameters @@ -86,7 +86,7 @@ def run(self, data_files, bvals_files, bvecs_files, small_delta, big_delta, ng, perng, parng (default: [] (all)) laplacian_weighting : float, optional - Weighting value used in fitting the MAPMRI model in the laplacian + Weighting value used in fitting the MAPMRI model in the Laplacian and both model types. (default: 0.05) radial_order : unsigned int, optional Even value used to set the order of the basis @@ -96,7 +96,7 @@ def run(self, data_files, bvals_files, bvecs_files, small_delta, big_delta, out_rtop : string, optional Name of the rtop to be saved out_lapnorm : string, optional - Name of the norm of laplacian signal to be saved + Name of the norm of Laplacian signal to be saved out_msd : string, optional Name of the msd to be saved out_qiv : string, optional @@ -430,7 +430,7 @@ def run(self, input_files, bvalues_files, bvectors_files, mask_files, frf : variable float, optional Fiber response function can be for example inputed as 15 4 4 (from the command line) or [15, 4, 4] from a Python script to be - converted to float and mutiplied by 10**-4 . If None + converted to float and multiplied by 10**-4 . If None the fiber response function will be computed automatically (default: None). extract_pam_values : bool, optional diff --git a/dipy/workflows/tests/test_tracking.py b/dipy/workflows/tests/test_tracking.py index 4da7bc18f4..95f3c5d6ba 100644 --- a/dipy/workflows/tests/test_tracking.py +++ b/dipy/workflows/tests/test_tracking.py @@ -14,7 +14,7 @@ PFTrackingPAMFlow) -def test_particule_filtering_traking_workflows(): +def test_particle_filtering_traking_workflows(): with TemporaryDirectory() as out_dir: dwi_path, bval_path, bvec_path = get_fnames('small_64D') vol_img = nib.load(dwi_path) @@ -170,7 +170,7 @@ def test_local_fiber_tracking_workflow(): lf_track_pam.last_generated_outputs['out_tractogram'] assert_false(is_tractogram_empty(tractogram_path)) - # Test tracking with pam with sh and closestpeaks getter + # Test tracking with pam with sh and closest peaks getter lf_track_pam = LocalFiberTrackingPAMFlow() lf_track_pam._force_overwrite = True lf_track_pam.run(pam_path, gfa_path, seeds_path, @@ -223,4 +223,4 @@ def seeds_are_same_space_as_streamlines(tractogram_path): if __name__ == '__main__': test_local_fiber_tracking_workflow() - test_particule_filtering_traking_workflows() + test_particle_filtering_traking_workflows() diff --git a/dipy/workflows/workflow.py b/dipy/workflows/workflow.py index 6afbcffbad..da4aab11e9 100644 --- a/dipy/workflows/workflow.py +++ b/dipy/workflows/workflow.py @@ -89,7 +89,7 @@ def run(self, *args, **kwargs): """Execute the workflow. Since this is an abstract class, raise exception if this code is - reached (not impletemented in child class or literally called on this + reached (not implemented in child class or literally called on this class) """ raise Exception('Error: {} does not have a run method.'. diff --git a/doc/examples/reconst_qtdmri.py b/doc/examples/reconst_qtdmri.py index 267608c941..067c263820 100644 --- a/doc/examples/reconst_qtdmri.py +++ b/doc/examples/reconst_qtdmri.py @@ -407,7 +407,7 @@ def plot_mean_with_std(ax, time, ind1, plotcolor, ls='-', std_mult=1, also allows for the estimation of time-dependent ODFs. Once the Qtdmri model is fitted it can be simply called by qtdmri_fit.odf(sphere, s=sharpening_factor). This is identical to how the mapmri module functions, -and allows to study the time-dependence of ODF directionallity. +and allows to study the time-dependence of ODF directionality. This concludes the example on qt-dMRI. As we showed, approaches such as qt-dMRI can help in studying the (finite-:math:`\tau`) temporal properties of diffusion diff --git a/doc/examples/simulate_dki.py b/doc/examples/simulate_dki.py index 435090d0f1..211d1ef6d4 100644 --- a/doc/examples/simulate_dki.py +++ b/doc/examples/simulate_dki.py @@ -36,7 +36,7 @@ """ DKI requires data from more than one non-zero b-value. Since the dataset -``small_64D`` was acquired with one non-zero bvalue we artificialy produce a +``small_64D`` was acquired with one non-zero b-value we artificially produce a second non-zero b-value. """ @@ -80,7 +80,7 @@ of each fiber population and the water fraction of each different medium """ -fie = 0.49 # intra axonal water fraction +fie = 0.49 # intra-axonal water fraction fractions = [fie*50, (1 - fie)*50, fie*50, (1 - fie)*50] """ diff --git a/doc/tools/docgen_cmd.py b/doc/tools/docgen_cmd.py index c15f5f086c..6158167f47 100755 --- a/doc/tools/docgen_cmd.py +++ b/doc/tools/docgen_cmd.py @@ -117,7 +117,7 @@ def get_help_string(class_obj): f.lower().startswith("dipy_")] workflow_desc = {} - # We get all workflows class obj in a dictionnary + # We get all workflows class obj in a dictionary for path_file in os.listdir(pjoin('..', 'dipy', 'workflows')): module_name = inspect.getmodulename(path_file) if module_name is None: