Skip to content

Commit

Permalink
Fix typos with codespell
Browse files Browse the repository at this point in the history
  • Loading branch information
tritemio committed May 8, 2018
1 parent 0f8a9c0 commit f807708
Show file tree
Hide file tree
Showing 15 changed files with 66 additions and 50 deletions.
2 changes: 1 addition & 1 deletion fretbursts/bg_cache.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
Background caching only works when `bg_fun = bg.exp_fit` (MLE tail fit) and
assumes that `bg_ph_sel == Ph_sel('all')`.
Background estimation results are indentified by the `Data.calc_bg` arguments::
Background estimation results are identified by the `Data.calc_bg` arguments::
time_s, tail_min_us, F_bg, error_metrics, fit_allph
Expand Down
4 changes: 2 additions & 2 deletions fretbursts/burst_plot.py
Original file line number Diff line number Diff line change
Expand Up @@ -772,7 +772,7 @@ def time_ph(d, i=0, num_ph=1e4, ph_istart=0):
#
def _bins_array(bins):
"""When `bins` is a 3-element sequence returns an array of bin edges.
Otherwise returns the `bins` unchaged.
Otherwise returns the `bins` unchanged.
"""
if np.size(bins) == 3:
bins = np.arange(*bins)
Expand Down Expand Up @@ -1489,7 +1489,7 @@ def hist_interphoton_single(d, i=0, binwidth=1e-4, tmax=None, bins=None,
_plot_status['hist_interphoton_single'] = {'autoscale': False}
plt.xlabel('Inter-photon delays (%s)' % xunit.replace('us', 'μs'))
plt.ylabel('# Delays')
# Return interal variables so that other functions can extend the plot
# Return internal variables so that other functions can extend the plot
return dict(counts=counts, n_trim=n_trim, plot_style_=plot_style_,
t_ax=t_ax, scalex=scalex)

Expand Down
14 changes: 7 additions & 7 deletions fretbursts/burstlib.py
Original file line number Diff line number Diff line change
Expand Up @@ -105,7 +105,7 @@ def Sel(d_orig, filter_fun, negate=False, nofret=False, **kwargs):
# Bursts and Timestamps utilities
#
def get_alex_fraction(on_range, alex_period):
"""Get the fraction of period beween two numbers indicating a range.
"""Get the fraction of period between two numbers indicating a range.
"""
assert len(on_range) == 2
if on_range[0] < on_range[1]:
Expand Down Expand Up @@ -377,9 +377,9 @@ def b_fuse(bursts, ms=0, clk_p=12.5e-9):
# Compute gap and gap_counts
gap = fused_bursts2.start - fused_bursts1.stop
gap_counts = fused_bursts2.istart - fused_bursts1.istop - 1 # yes it's -1
overlaping = fused_bursts1.istop >= fused_bursts2.istart
gap[overlaping] = 0
gap_counts[overlaping] = 0
overlapping = fused_bursts1.istop >= fused_bursts2.istart
gap[overlapping] = 0
gap_counts[overlapping] = 0

# Assign the new burst data
# fused_bursts1 has alredy the right start and istart
Expand Down Expand Up @@ -475,7 +475,7 @@ def mask_empty(mask):
is_slice_empty = (mask.stop == 0)
return is_slice_empty
else:
# Bolean array
# Boolean array
return not mask.any()


Expand Down Expand Up @@ -2322,7 +2322,7 @@ def select_bursts(self, filter_fun, negate=False, computefret=True,
arbitrary selection rules.
Arguments:
filter_fun (fuction): function used for burst selection
filter_fun (function): function used for burst selection
negate (boolean): If True, negates (i.e. take the complementary)
of the selection returned by `filter_fun`. Default `False`.
computefret (boolean): If True (default) recompute donor and
Expand Down Expand Up @@ -2360,7 +2360,7 @@ def select_bursts_mask(self, filter_fun, negate=False, return_str=False,
object to a second object. Otherwise use :meth:`Data.select_bursts`.
Arguments:
filter_fun (fuction): function used for burst selection
filter_fun (function): function used for burst selection
negate (boolean): If True, negates (i.e. take the complementary)
of the selection returned by `filter_fun`. Default `False`.
return_str: if True return, for each channel, a tuple with
Expand Down
6 changes: 3 additions & 3 deletions fretbursts/burstlib_ext.py
Original file line number Diff line number Diff line change
Expand Up @@ -401,15 +401,15 @@ def _burst_data_ich(dx, ich, include_bg=False, include_ph_index=False):
def burst_photons(dx, skip_ch=None):
"""Return a table (`pd.DataFrame`) of photon data for bursts in `dx`.
The returned DataFrame has a hierachical index made of two integers:
The returned DataFrame has a hierarchical index made of two integers:
(burst_id, photon_id). `burst_id` identifies the burst
while `photon_id` identifies each photon in a burst.
`burst_id` is the same number used in as index in the `DataFrame`
returned by :func:`burst_data`.
`photon_id` always starts at 0 for the first photon in each burst.
The columns include:
- *timstamp*: the timestamp of each photon
- *timestamp*: the timestamp of each photon
- *nantotime*: the TCSPC nanotime of each photon (if available)
- *stream*: a categorical column indicating the stream of each photon.
- *spot*: (multispot only) the spot number for each photon
Expand Down Expand Up @@ -741,7 +741,7 @@ def join_data(d_list, gap=0):
Returns:
A `Data` object containing bursts from the all the objects in `d_list`.
This object will not contain timestamps, therefore it is possible
to perform burst selections but not a new burst serach.
to perform burst selections but not a new burst search.
Example:
If `d1` and `d2` are two measurements to concatenate::
Expand Down
16 changes: 8 additions & 8 deletions fretbursts/dataload/pytables_array_list.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,23 +51,23 @@ def __init__(self, file, overwrite=False, parent_node='/',
self.prefix = prefix
self.compression = compression

## Retrive the file reference file
# Retrieve the file reference file
if type(file) is tables.file.File:
self.data_file = file
elif os.path.exists(file) and not overwrite:
self.data_file = tables.open_file(file, mode = "a")
self.data_file = tables.open_file(file, mode="a")
else:
self.data_file = tables.open_file(file, mode = "w",
title = "Container for lists of arrays")
self.data_file = tables.open_file(
file, mode="w", title="Container for lists of arrays")

## Create the group if not existent
# Create the group if not existent
if group_name not in self.data_file.get_node(parent_node):
self.data_file.create_group(parent_node, group_name,
title=group_descr)
self.group = self.data_file.get_node(parent_node, group_name)

if 'size' in self.group._v_attrs:
## If the group was already present read the data
# If the group was already present read the data
self.size = self.group._v_attrs.size
self.prefix = self.group._v_attrs.prefix
for i in range(self.group._v_attrs.size):
Expand All @@ -76,7 +76,7 @@ def __init__(self, file, overwrite=False, parent_node='/',
array_ = array_[:]
super(PyTablesList, self).append(array_)
else:
## If a new group save some metadata
# If a new group save some metadata
self.group._v_attrs.size = self.size
self.group._v_attrs.prefix = self.prefix
self.group._v_attrs.load_array = self.load_array
Expand All @@ -98,4 +98,4 @@ def append(self, ndarray):
self.group._v_attrs.size = self.size

def get_array_list(self):
return [array_[:] for array_ in self]
return [array_[:] for array_ in self]
19 changes: 14 additions & 5 deletions fretbursts/exptools.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,21 +67,22 @@ def estimate_tau(sample, median=False, weights=None):
Arguments:
sample (array): the exponetially-distributed samples
median (bool): if False thes mean estimator is mean(sample). If True
median (bool): if False the mean estimator is mean(sample). If True
uses median(samples)/ln(2) for mean esitmator (more robust).
weights (array or None): optional array of sample weights.
Returns:
An estimation of the `tau` parameter (the mean, inverse of rate).
"""
if median == False:
if median is False:
fitted_tau = np.average(sample, weights=weights)
else:
fitted_tau = weighted_median(sample, weights)/np.log(2)
if weights is None:
assert np.allclose(fitted_tau*np.log(2), np.median(sample))
return fitted_tau


def tail_mean(sample, threshold=0, weights=None, median=False,
return_ci=False):
"""Estimate `tau` and num. samples of the exponetial tail of `samples`.
Expand Down Expand Up @@ -111,9 +112,11 @@ def tail_mean(sample, threshold=0, weights=None, median=False,
else:
return mean_, num_samples


def select_tail(sample_tot, threshold):
return sample_tot[sample_tot >= threshold] - threshold


def zeta_values(sorted_sample, median=False):
assert (sorted_sample >= 0).all()
fitted_tau = estimate_tau(sorted_sample, median=median)
Expand All @@ -130,6 +133,7 @@ def kolgomorv_stat(zeta):
D = max(Dplus, Dminus)
return D


def kolgomorv_stat_n(zeta):
D = kolgomorv_stat(zeta)
n = zeta.size
Expand All @@ -142,9 +146,10 @@ def cramervonmises_stat(zeta):
n = zeta.size
i = np.arange(1, n + 1)
term2 = (2*i - 1)/(2*n)
W2 = np.sum((zeta - term2)**2) + 1/(12*n)
W2 = np.sum((zeta - term2)**2) + 1/(12*n)
return W2


def cramervonmises_stat_n(zeta):
W2 = cramervonmises_stat(zeta)
n = zeta.size
Expand All @@ -158,6 +163,7 @@ def watson_stat(zeta):
U2 = W2 - n*(np.mean(zeta) - 0.5)**2
return U2


def watson_stat_n(zeta):
U2 = watson_stat(zeta)
n = zeta.size
Expand All @@ -174,6 +180,7 @@ def andersondarling_stat(zeta):
A2 = -np.sum(f1*(log1 + log2))/n - n
return A2


def andersondarling_stat_n(zeta):
A2 = andersondarling_stat(zeta)
n = zeta.size
Expand All @@ -187,7 +194,7 @@ def exp_test_stat(sample, threshold, median=False, metric='KS',
Arguments:
sample (array): supposedly exponential distributed samples.
threshold (float): theshold used to leselt the sample "tail",
threshold (float): threshold used to leselt the sample "tail",
i.e. `sample[sample > threshold]`.
median (bool): if False, estimate the sample mean using the emirical
mean. If True, the sample mean is estimated using the median.
Expand Down Expand Up @@ -231,12 +238,14 @@ def exp_tail_stats(sample, thresholds, metric, asymptotic, median,
mean_fit[idx], num_samples[idx], mean_ci[idx] = \
tail_mean(sample, threshold=th,
median=median, return_ci=True)
if num_samples[idx] == 0: break
if num_samples[idx] == 0:
break
stats[idx] = exp_test_stat(sample,
threshold=th, metric=metric,
median=median, asymptotic=asymptotic)
return mean_fit, mean_ci, num_samples, stats


def exp_dist_amplitude(meantau_th, meantau_th_ci, num_samples_th,
thresholds, mean_fitrange):
"""Compute total exponential distribution parameters from the tail.
Expand Down
4 changes: 2 additions & 2 deletions fretbursts/fit/gaussian_fitting.py
Original file line number Diff line number Diff line change
Expand Up @@ -222,7 +222,7 @@ def two_gaussian_fit_curve(x, y, p0, return_all=False, verbose=False, **kwargs):
"""Fit a 2-gaussian mixture to the (x,y) curve.
`kwargs` are passed to the leastsq() function.
If return_all=False then return only the fitted paramaters
If return_all=False then return only the fitted parameters
If return_all=True then the full output of leastsq is returned.
"""
if kwargs['method'] == 'leastsq':
Expand Down Expand Up @@ -264,7 +264,7 @@ def two_gaussian_fit_KDE_curve(s, p0=[0, 0.1, 0.6, 0.1, 0.5], weights=None,
x_pdf (array): array on which the KDE PDF is evaluated and curve-fitted
weights (array): optional weigths, same size as `s` (for ex.
1/sigma^2 ~ nt).
debug (bool): if True perfoms more tests and print more info.
debug (bool): if True performs more tests and print more info.
Additional kwargs are passed to scipy.stats.gaussian_kde().
Expand Down
2 changes: 1 addition & 1 deletion fretbursts/fret_fit.py
Original file line number Diff line number Diff line change
Expand Up @@ -210,7 +210,7 @@ def get_weights(nd, na, weights, naa=0, gamma=1., widths=None):
weights = nt - nt.min() + 1
elif weights == 'size2': # weight = (burst size)^2
weights = nt**2
elif weights == 'sqrt': # weigth = sqrt(burst size)
elif weights == 'sqrt': # weight = sqrt(burst size)
weights = np.sqrt(nt)
elif weights == 'inv_size': # weight = 1/(burst size)
weights = 1./(nt)
Expand Down
20 changes: 14 additions & 6 deletions fretbursts/fretmath.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@
# Comple corrections
#
def correct_E_gamma_leak_dir(Eraw, gamma=1, leakage=0, dir_ex_t=0):
"""Compute corrected FRET efficency from proximity ratio `Eraw`.
"""Compute corrected FRET efficiency from proximity ratio `Eraw`.
For the inverse function see :func:`uncorrect_E_gamma_leak_dir`.
Expand All @@ -42,8 +42,9 @@ def correct_E_gamma_leak_dir(Eraw, gamma=1, leakage=0, dir_ex_t=0):
"""
if isinstance(Eraw, (list, tuple)):
Eraw = np.asarray(Eraw)
return (Eraw*(leakage + dir_ex_t*gamma + 1) - leakage - dir_ex_t*gamma) \
/ (Eraw*(leakage - gamma + 1) - leakage + gamma)
return ((Eraw*(leakage + dir_ex_t*gamma + 1) - leakage - dir_ex_t*gamma)
/ (Eraw*(leakage - gamma + 1) - leakage + gamma))


def uncorrect_E_gamma_leak_dir(E, gamma=1, leakage=0, dir_ex_t=0):
"""Compute proximity ratio from corrected FRET efficiency `E`.
Expand All @@ -64,8 +65,8 @@ def uncorrect_E_gamma_leak_dir(E, gamma=1, leakage=0, dir_ex_t=0):
"""
if isinstance(E, (list, tuple)):
E = np.asarray(E)
return (E*(gamma - leakage) + leakage + dir_ex_t*gamma) \
/ (E*(gamma - leakage - 1) + leakage + dir_ex_t*gamma + 1)
return ((E*(gamma - leakage) + leakage + dir_ex_t*gamma)
/ (E*(gamma - leakage - 1) + leakage + dir_ex_t*gamma + 1))


##
Expand All @@ -86,6 +87,7 @@ def gamma_correct_E(Eraw, gamma):
Eraw = np.asarray(Eraw)
return Eraw / (gamma - gamma*Eraw + Eraw)


def gamma_uncorrect_E(E, gamma):
"""Reverse gamma correction and return uncorrected FRET.
Expand All @@ -105,6 +107,7 @@ def leakage_correct_E(Eraw, leakage):
Eraw = np.asarray(Eraw)
return (Eraw*(leakage + 1) - leakage) / (Eraw*leakage - leakage + 1)


def leakage_uncorrect_E(E, leakage):
"""Reverse leakage correction and return uncorrected FRET.
Expand All @@ -129,6 +132,7 @@ def dir_ex_correct_E(Eraw, dir_ex_t):
Eraw = np.asarray(Eraw)
return Eraw*(dir_ex_t + 1) - dir_ex_t


def dir_ex_uncorrect_E(E, dir_ex_t):
"""Reverse direct excitation correction and return uncorrected FRET.
Expand All @@ -138,6 +142,7 @@ def dir_ex_uncorrect_E(E, dir_ex_t):
E = np.asarray(E)
return (E + dir_ex_t) / (dir_ex_t + 1)


def correct_S(Eraw, Sraw, gamma, leakage, dir_ex_t):
"""Correct S values for gamma, leakage and direct excitation.
Expand All @@ -150,7 +155,7 @@ def correct_S(Eraw, Sraw, gamma, leakage, dir_ex_t):
leakage (float): donor emission leakage into the acceptor channel.
dir_ex_t (float): direct acceptor excitation by donor laser.
Defined as ``n_dir = dir_ex_t * (na + g nd)``. The dir_ex_t
coefficient is the ratio between D and A absorbtion cross-sections
coefficient is the ratio between D and A absorption cross-sections
at the donor-excitation wavelength.
Returns
Expand All @@ -164,12 +169,14 @@ def correct_S(Eraw, Sraw, gamma, leakage, dir_ex_t):
(Eraw*leakage*Sraw - Eraw*Sraw*gamma + Eraw*Sraw - leakage*Sraw -
Sraw*dir_ex_t + Sraw*gamma - Sraw + dir_ex_t + 1))


def uncorrect_S(E_R, S, gamma, L_k, d_dirT):
"""Function used to test :func:`correct_S`."""
return (S*(d_dirT + 1) /
(-E_R*L_k*S + E_R*L_k + E_R*S*gamma - E_R*S - E_R*gamma +
E_R + L_k*S - L_k + S*d_dirT - S*gamma + S + gamma))


def test_fretmath():
"""Run a few consistency checks for the correction functions.
"""
Expand Down Expand Up @@ -247,6 +254,7 @@ def test_fretmath():
assert np.allclose(S_uncorr, Sx)
assert (S_corr.min() > -0.5) and (S_corr.max() < 1.5)


if __name__ == '__main__':
test_fretmath()
print('All tests passed.')
2 changes: 1 addition & 1 deletion fretbursts/legacy_mode.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@
The old behaviour was loading a big chunk of matplotlib.pyplot and
a lot of functions from burstlib.
Now the prefered way is to use always `plt.` to access the matplotlib.pyplot
Now the preferred way is to use always `plt.` to access the matplotlib.pyplot
functions and `bl.` to access any function in `burstlib`. This change makes
maintenance, unit testing and installation easier.
Expand Down

0 comments on commit f807708

Please sign in to comment.